Reformat code with black

This commit is contained in:
Edward Betts 2024-04-17 09:12:11 +01:00
parent 0ba43ea993
commit 777ede64f8
2 changed files with 86 additions and 72 deletions

59
get.py
View file

@ -8,64 +8,71 @@ import lxml.html
from random import shuffle from random import shuffle
from time import sleep from time import sleep
re_recommend = re.compile(' <a class="actionLinkLite " href="(/recommendations/([^/]*?)/([^/]*?))">') re_recommend = re.compile(
' <a class="actionLinkLite " href="(/recommendations/([^/]*?)/([^/]*?))">'
)
s = requests.Session() s = requests.Session()
cookie_dir = '/home/edward/lib/cookies' cookie_dir = "/home/edward/lib/cookies"
cookie_file = os.path.join(cookie_dir, 'goodreads') cookie_file = os.path.join(cookie_dir, "goodreads")
cj = LWPCookieJar(cookie_file) cj = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file): if os.path.exists(cookie_file):
cj.load() cj.load()
s.cookies = cj s.cookies = cj
def login(): def login():
sign_in_page = 'https://www.goodreads.com/user/sign_in' sign_in_page = "https://www.goodreads.com/user/sign_in"
page = s.get(sign_in_page).text page = s.get(sign_in_page).text
open('sign_in.html', 'w').write(page) open("sign_in.html", "w").write(page)
if '"name":"Edward Betts"' in page: if '"name":"Edward Betts"' in page:
return # already signed in return # already signed in
re_token = re.compile('<input type="hidden" name="authenticity_token" value="([^"]*?)" />') re_token = re.compile(
'<input type="hidden" name="authenticity_token" value="([^"]*?)" />'
)
re_n = re.compile("<input name='n' type='hidden' value='(\d+)'>") re_n = re.compile("<input name='n' type='hidden' value='(\d+)'>")
token = re_token.search(page).group(1) token = re_token.search(page).group(1)
data = { data = {
'utf8': u'\u2713', "utf8": "\u2713",
'authenticity_token': token, "authenticity_token": token,
'user[email]': 'edward@4angle.com', "user[email]": "edward@4angle.com",
'user[password]': '8V8~9:3~U!Ly', "user[password]": "8V8~9:3~U!Ly",
'remember_me': 1, "remember_me": 1,
'next': 'Sign in', "next": "Sign in",
'n': re_n.search(page).group(1), "n": re_n.search(page).group(1),
} }
print(token) print(token)
print(data['n']) print(data["n"])
r = s.post(sign_in_page, data=data, headers={'referer': sign_in_page}) r = s.post(sign_in_page, data=data, headers={"referer": sign_in_page})
open('signed_in.html', 'w').write(r.text) open("signed_in.html", "w").write(r.text)
root = lxml.html.fromstring(r.content) root = lxml.html.fromstring(r.content)
flash = root.find_class('flash') flash = root.find_class("flash")
if flash: if flash:
print('flash:', flash[0].text) print("flash:", flash[0].text)
cj.save(ignore_discard=True) cj.save(ignore_discard=True)
def get_index(): def get_index():
# url = 'https://www.goodreads.com/recommendations' # url = 'https://www.goodreads.com/recommendations'
url = 'https://www.goodreads.com/recommendations/?recs_current_view=list' url = "https://www.goodreads.com/recommendations/?recs_current_view=list"
r = s.get(url) r = s.get(url)
open('recommendations.html', 'w').write(r.text) open("recommendations.html", "w").write(r.text)
def get_individual(): def get_individual():
for line in open('recommendations.html'): for line in open("recommendations.html"):
if 'actionLinkLite' not in line: if "actionLinkLite" not in line:
continue continue
m = re_recommend.match(line) m = re_recommend.match(line)
if m: if m:
@ -78,13 +85,13 @@ get_index()
recommend_list = list(get_individual()) recommend_list = list(get_individual())
shuffle(recommend_list) shuffle(recommend_list)
headers = {'Accept': 'text/html'} headers = {"Accept": "text/html"}
for a, b, c in recommend_list: for a, b, c in recommend_list:
print((b, c)) print((b, c))
url = 'https://www.goodreads.com' + a url = "https://www.goodreads.com" + a
r = s.get(url, headers=headers) r = s.get(url, headers=headers)
filename = os.path.join(b, c + '.html') filename = os.path.join(b, c + ".html")
open(filename, 'w').write(r.text) open(filename, "w").write(r.text)
sleep(0.5) sleep(0.5)

View file

@ -9,87 +9,92 @@ import requests
from time import sleep from time import sleep
from pprint import pprint from pprint import pprint
parser = lxml.html.HTMLParser(encoding='utf-8') parser = lxml.html.HTMLParser(encoding="utf-8")
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates')) env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
re_book = re.compile('function refreshGroupBox(group_id, book_id) \{(.*?)\n *\}', re.DOTALL) re_book = re.compile(
re_book = re.compile('function refreshGroupBox\(group_id, book_id\) \{(.*?)\n *\}', re.DOTALL) "function refreshGroupBox(group_id, book_id) \{(.*?)\n *\}", re.DOTALL
)
re_book = re.compile(
"function refreshGroupBox\(group_id, book_id\) \{(.*?)\n *\}", re.DOTALL
)
re_tip = re.compile(br'var newTip = new Tip\(\$\(\'[^\']+\'\), "(.*?)", {') re_tip = re.compile(rb'var newTip = new Tip\(\$\(\'[^\']+\'\), "(.*?)", {')
dirs = ['shelf', 'genre'] dirs = ["shelf", "genre"]
start = 'https://www.goodreads.com/book/show/' start = "https://www.goodreads.com/book/show/"
existing = {book["title"] for book in json.load(open("calibre_book_list.json"))}
existing = {book['title'] for book in json.load(open('calibre_book_list.json'))}
def iter_books(): def iter_books():
for d in dirs: for d in dirs:
for f in sorted(os.listdir(d)): for f in sorted(os.listdir(d)):
filename = os.path.join(d, f) filename = os.path.join(d, f)
root = lxml.html.parse(filename, parser=parser).getroot() root = lxml.html.parse(filename, parser=parser).getroot()
for div in root.find_class('bookInformation'): for div in root.find_class("bookInformation"):
link = div.find('.//a') link = div.find(".//a")
rating = div.find('.//span[@class="minirating"]') rating = div.find('.//span[@class="minirating"]')
description = div.find('./div[@class="bookDescription"]') description = div.find('./div[@class="bookDescription"]')
r = rating[0].tail.strip() r = rating[0].tail.strip()
cover = div.getnext().find('.//img').get('src') cover = div.getnext().find(".//img").get("src")
book = { book = {
'title': link.text, "title": link.text,
'url': link.get('href'), "url": link.get("href"),
'rating': r, "rating": r,
'r': float(r[:3]), "r": float(r[:3]),
'cover': cover, "cover": cover,
'authors': [a.text for a in div.find_class('authorName')], "authors": [a.text for a in div.find_class("authorName")],
} }
if description is not None: if description is not None:
index = 1 if len(description) == 3 else 0 index = 1 if len(description) == 3 else 0
book['description'] = description[index].text book["description"] = description[index].text
yield d, f, book yield d, f, book
continue continue
# print(filename) # print(filename)
for line in open(filename, 'rb'): for line in open(filename, "rb"):
if b'var newTip' not in line: if b"var newTip" not in line:
continue continue
print(line) print(line)
m = re_tip.search(line) m = re_tip.search(line)
tip = m.group(1).decode('unicode_escape').replace('\/', '/') tip = m.group(1).decode("unicode_escape").replace("\/", "/")
# tip = m.group(1) # .replace('\/', '/') # tip = m.group(1) # .replace('\/', '/')
# print(tip) # print(tip)
if '<ul class="formatting_tips recommendation_tip">' in tip: if '<ul class="formatting_tips recommendation_tip">' in tip:
continue continue
if 'Recommendations are disabled for that shelf.' in tip: if "Recommendations are disabled for that shelf." in tip:
continue continue
if 'Customize by selecting your' in tip: if "Customize by selecting your" in tip:
continue continue
print(tip) print(tip)
yield (d, f, lxml.html.fromstring(tip)) yield (d, f, lxml.html.fromstring(tip))
template = env.get_template('books.html') template = env.get_template("books.html")
seen = set() seen = set()
books = [] books = []
first_authors = set() first_authors = set()
for d, f, book in sorted(iter_books(), key=lambda i: i[2]['r'], reverse=True): for d, f, book in sorted(iter_books(), key=lambda i: i[2]["r"], reverse=True):
# pprint(book) # pprint(book)
# print(repr(book.get('description'))) # print(repr(book.get('description')))
# continue # continue
# title_link = book.find_class('bookTitle')[0] # title_link = book.find_class('bookTitle')[0]
url = book['url'] url = book["url"]
url = url[:url.find('?')] url = url[: url.find("?")]
if url in seen: if url in seen:
continue continue
seen.add(url) seen.add(url)
title = book['title'] title = book["title"]
authors = book['authors'] authors = book["authors"]
first_authors.add(authors[0]) first_authors.add(authors[0])
main_title = title main_title = title
# for sep in ['(']: # for sep in ['(']:
for sep in ':', ' - ', '(', ',': for sep in ":", " - ", "(", ",":
if sep in title: if sep in title:
main_title = title[: title.find(sep)] main_title = title[: title.find(sep)]
break break
@ -99,29 +104,31 @@ for d, f, book in sorted(iter_books(), key=lambda i: i[2]['r'], reverse=True):
if main_title in existing: if main_title in existing:
continue continue
# print(u'{} by {}'.format(main_title, authors[0]).encode('utf-8')) # print(u'{} by {}'.format(main_title, authors[0]).encode('utf-8'))
print(u'{}'.format(main_title)) print("{}".format(main_title))
# print(main_title.encode('utf-8')) # print(main_title.encode('utf-8'))
assert url.startswith(start) assert url.startswith(start)
filename = 'books/' + url[len(start):] + '.html' filename = "books/" + url[len(start) :] + ".html"
# print(filename) # print(filename)
if False and not os.path.exists(filename): if False and not os.path.exists(filename):
open(filename, 'w').write(requests.get(url).content) open(filename, "w").write(requests.get(url).content)
sleep(1) sleep(1)
books.append({ books.append(
'dir': d, {
'file': f[:-5], "dir": d,
'title': title, "file": f[:-5],
'main_title': main_title, "title": title,
'authors': authors, "main_title": main_title,
'url': url, "authors": authors,
'rating': book['rating'], "url": url,
'cover': book['cover'], "rating": book["rating"],
'description': book.get('description'), "cover": book["cover"],
}) "description": book.get("description"),
}
)
page = template.render(books=books) page = template.render(books=books)
open('book_list.html', 'w').write(page) open("book_list.html", "w").write(page)
sys.exit(0) sys.exit(0)
for a in sorted(first_authors): for a in sorted(first_authors):