2023-09-24 21:36:45 +01:00
|
|
|
"""Access the Wikidata API."""
|
|
|
|
|
2023-09-24 15:50:53 +01:00
|
|
|
import hashlib
|
2023-09-23 18:36:09 +01:00
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import typing
|
|
|
|
|
|
|
|
import requests
|
|
|
|
|
|
|
|
commons_url = "https://www.wikidata.org/w/api.php"
|
|
|
|
wikidata_api = "https://www.wikidata.org/w/api.php"
|
2023-09-24 21:36:45 +01:00
|
|
|
user_agent = "conference-archive/0.1 (contact: edward@4angle.com)"
|
2023-09-23 18:36:09 +01:00
|
|
|
|
2023-09-24 21:41:05 +01:00
|
|
|
CallParams = dict[str, str | int]
|
|
|
|
|
2023-09-23 18:36:09 +01:00
|
|
|
s = requests.Session()
|
2023-09-24 21:36:45 +01:00
|
|
|
s.headers.update({"User-Agent": user_agent})
|
2023-09-23 18:36:09 +01:00
|
|
|
|
|
|
|
|
2023-09-24 15:50:53 +01:00
|
|
|
def md5sum(s: str) -> str:
|
|
|
|
"""Generate hex md5sum."""
|
|
|
|
return hashlib.md5(s.encode("utf-8")).hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
def search(q: str) -> list[dict[str, typing.Any]]:
|
2023-09-24 21:36:45 +01:00
|
|
|
"""Search Wikidata with caching."""
|
2023-09-24 15:50:53 +01:00
|
|
|
q_md5 = md5sum(q)
|
|
|
|
|
|
|
|
cache_filename = os.path.join("cache", q_md5 + ".json")
|
|
|
|
|
|
|
|
if os.path.exists(cache_filename):
|
|
|
|
data = json.load(open(cache_filename))
|
|
|
|
else:
|
|
|
|
params: dict[str, str | int] = {
|
|
|
|
"action": "query",
|
|
|
|
"list": "search",
|
|
|
|
"format": "json",
|
|
|
|
"formatversion": 2,
|
|
|
|
"srsearch": q,
|
|
|
|
"srlimit": "10",
|
|
|
|
}
|
|
|
|
r = requests.get(wikidata_api, params=params)
|
|
|
|
open(cache_filename, "w").write(r.text)
|
|
|
|
data = r.json()
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
return typing.cast(list[dict[str, typing.Any]], data["query"]["search"])
|
|
|
|
|
|
|
|
|
2023-09-23 18:36:09 +01:00
|
|
|
def api_image_detail_call(filename: str) -> requests.Response:
|
|
|
|
"""Call the Commons API."""
|
2023-09-24 21:41:05 +01:00
|
|
|
call_params: CallParams = {
|
2023-09-23 18:36:09 +01:00
|
|
|
"format": "json",
|
|
|
|
"formatversion": 2,
|
|
|
|
"action": "query",
|
|
|
|
"prop": "imageinfo",
|
|
|
|
"iiprop": "url",
|
|
|
|
"titles": f"File:{filename}",
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.get(commons_url, params=call_params, timeout=5)
|
|
|
|
|
|
|
|
|
|
|
|
def get_item(qid: str) -> typing.Any:
|
2023-09-24 21:36:45 +01:00
|
|
|
"""Get an item from Wikidata."""
|
2023-09-23 18:36:09 +01:00
|
|
|
cache_filename = os.path.join("items", qid + ".json")
|
|
|
|
if os.path.exists(cache_filename):
|
|
|
|
item = json.load(open(cache_filename))
|
|
|
|
else:
|
|
|
|
params: dict[str, str | int] = {
|
|
|
|
"action": "wbgetentities",
|
|
|
|
"ids": qid,
|
|
|
|
"format": "json",
|
|
|
|
"formatversion": 2,
|
|
|
|
}
|
|
|
|
r = s.get(wikidata_api, params=params)
|
|
|
|
item = r.json()["entities"][qid]
|
|
|
|
with open(cache_filename, "w") as f:
|
|
|
|
json.dump(item, f, indent=2)
|
|
|
|
time.sleep(0.1)
|
|
|
|
return item
|
|
|
|
|
|
|
|
|
|
|
|
def download_photo(filename: str) -> None:
|
|
|
|
save_to = os.path.join("static", "wikidata_photo", filename)
|
|
|
|
r = api_image_detail_call(filename)
|
|
|
|
try:
|
|
|
|
pages = r.json()["query"]["pages"]
|
|
|
|
except requests.exceptions.JSONDecodeError:
|
|
|
|
print(r.text)
|
|
|
|
raise
|
|
|
|
photo = pages[0]["imageinfo"][0]
|
|
|
|
photo_url = photo["url"]
|
|
|
|
while True:
|
|
|
|
r = s.get(photo_url)
|
|
|
|
if not r.content.startswith(b"<!DOCTYPE html>"):
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
with open(save_to, "wb") as out:
|
|
|
|
out.write(r.content)
|
|
|
|
print(len(r.content), filename)
|
|
|
|
|
|
|
|
|
|
|
|
def get_photo(filename: str) -> None:
|
2023-09-24 21:41:05 +01:00
|
|
|
"""Download filename and resize."""
|
2023-09-23 18:36:09 +01:00
|
|
|
save_to = os.path.join("static", "wikidata_photo", filename)
|
|
|
|
thumb = os.path.join("static", "wikidata_photo", "thumb", filename)
|
|
|
|
if not os.path.exists(save_to):
|
|
|
|
download_photo(filename)
|
|
|
|
if not os.path.exists(thumb):
|
|
|
|
subprocess.run(["convert", "-resize", "1024x", save_to, thumb])
|
|
|
|
if filename.endswith("jpg") or filename.endswith("jpeg"):
|
|
|
|
subprocess.run(["jpegoptim", "-S1048576", thumb])
|
2023-09-24 15:50:53 +01:00
|
|
|
|
|
|
|
|
|
|
|
wikidata_properties = [
|
|
|
|
("website", "P856", "official website", None),
|
|
|
|
("twitter", "P2002", "Twitter username", "https://twitter.com/$1"),
|
|
|
|
("github", "P2037", "GitHub username", "https://github.com/$1"),
|
|
|
|
(
|
|
|
|
"linkedin",
|
|
|
|
"P6634",
|
|
|
|
"LinkedIn personal profile ID",
|
|
|
|
"https://www.linkedin.com/in/$1/",
|
|
|
|
),
|
|
|
|
("mastodon_address", "P4033", "Mastodon address", None),
|
|
|
|
("dblp", "P2456", "DBLP author ID", "https://dblp.org/pid/$1"),
|
|
|
|
("blog_url", "P1581", "official blog URL", None),
|
|
|
|
(
|
|
|
|
"hacker_news",
|
|
|
|
"P7171",
|
|
|
|
"Hacker News username",
|
|
|
|
"https://news.ycombinator.com/user?id=$1",
|
|
|
|
),
|
|
|
|
("reddit", "P4265", "Reddit username", "https://www.reddit.com/user/$1"),
|
|
|
|
]
|