Reformat code with black
This commit is contained in:
parent
0ba43ea993
commit
777ede64f8
59
get.py
59
get.py
|
@ -8,64 +8,71 @@ import lxml.html
|
|||
from random import shuffle
|
||||
from time import sleep
|
||||
|
||||
re_recommend = re.compile(' <a class="actionLinkLite " href="(/recommendations/([^/]*?)/([^/]*?))">')
|
||||
re_recommend = re.compile(
|
||||
' <a class="actionLinkLite " href="(/recommendations/([^/]*?)/([^/]*?))">'
|
||||
)
|
||||
|
||||
s = requests.Session()
|
||||
|
||||
cookie_dir = '/home/edward/lib/cookies'
|
||||
cookie_file = os.path.join(cookie_dir, 'goodreads')
|
||||
cookie_dir = "/home/edward/lib/cookies"
|
||||
cookie_file = os.path.join(cookie_dir, "goodreads")
|
||||
|
||||
cj = LWPCookieJar(cookie_file)
|
||||
if os.path.exists(cookie_file):
|
||||
cj.load()
|
||||
s.cookies = cj
|
||||
|
||||
|
||||
def login():
|
||||
sign_in_page = 'https://www.goodreads.com/user/sign_in'
|
||||
sign_in_page = "https://www.goodreads.com/user/sign_in"
|
||||
page = s.get(sign_in_page).text
|
||||
open('sign_in.html', 'w').write(page)
|
||||
open("sign_in.html", "w").write(page)
|
||||
if '"name":"Edward Betts"' in page:
|
||||
return # already signed in
|
||||
|
||||
re_token = re.compile('<input type="hidden" name="authenticity_token" value="([^"]*?)" />')
|
||||
re_token = re.compile(
|
||||
'<input type="hidden" name="authenticity_token" value="([^"]*?)" />'
|
||||
)
|
||||
re_n = re.compile("<input name='n' type='hidden' value='(\d+)'>")
|
||||
|
||||
token = re_token.search(page).group(1)
|
||||
|
||||
data = {
|
||||
'utf8': u'\u2713',
|
||||
'authenticity_token': token,
|
||||
'user[email]': 'edward@4angle.com',
|
||||
'user[password]': '8V8~9:3~U!Ly',
|
||||
'remember_me': 1,
|
||||
'next': 'Sign in',
|
||||
'n': re_n.search(page).group(1),
|
||||
"utf8": "\u2713",
|
||||
"authenticity_token": token,
|
||||
"user[email]": "edward@4angle.com",
|
||||
"user[password]": "8V8~9:3~U!Ly",
|
||||
"remember_me": 1,
|
||||
"next": "Sign in",
|
||||
"n": re_n.search(page).group(1),
|
||||
}
|
||||
|
||||
print(token)
|
||||
print(data['n'])
|
||||
print(data["n"])
|
||||
|
||||
r = s.post(sign_in_page, data=data, headers={'referer': sign_in_page})
|
||||
r = s.post(sign_in_page, data=data, headers={"referer": sign_in_page})
|
||||
|
||||
open('signed_in.html', 'w').write(r.text)
|
||||
open("signed_in.html", "w").write(r.text)
|
||||
|
||||
root = lxml.html.fromstring(r.content)
|
||||
flash = root.find_class('flash')
|
||||
flash = root.find_class("flash")
|
||||
if flash:
|
||||
print('flash:', flash[0].text)
|
||||
print("flash:", flash[0].text)
|
||||
|
||||
cj.save(ignore_discard=True)
|
||||
|
||||
|
||||
def get_index():
|
||||
# url = 'https://www.goodreads.com/recommendations'
|
||||
url = 'https://www.goodreads.com/recommendations/?recs_current_view=list'
|
||||
url = "https://www.goodreads.com/recommendations/?recs_current_view=list"
|
||||
|
||||
r = s.get(url)
|
||||
open('recommendations.html', 'w').write(r.text)
|
||||
open("recommendations.html", "w").write(r.text)
|
||||
|
||||
|
||||
def get_individual():
|
||||
for line in open('recommendations.html'):
|
||||
if 'actionLinkLite' not in line:
|
||||
for line in open("recommendations.html"):
|
||||
if "actionLinkLite" not in line:
|
||||
continue
|
||||
m = re_recommend.match(line)
|
||||
if m:
|
||||
|
@ -78,13 +85,13 @@ get_index()
|
|||
recommend_list = list(get_individual())
|
||||
shuffle(recommend_list)
|
||||
|
||||
headers = {'Accept': 'text/html'}
|
||||
headers = {"Accept": "text/html"}
|
||||
|
||||
for a, b, c in recommend_list:
|
||||
print((b, c))
|
||||
url = 'https://www.goodreads.com' + a
|
||||
url = "https://www.goodreads.com" + a
|
||||
|
||||
r = s.get(url, headers=headers)
|
||||
filename = os.path.join(b, c + '.html')
|
||||
open(filename, 'w').write(r.text)
|
||||
filename = os.path.join(b, c + ".html")
|
||||
open(filename, "w").write(r.text)
|
||||
sleep(0.5)
|
||||
|
|
99
parse.py
99
parse.py
|
@ -9,89 +9,94 @@ import requests
|
|||
from time import sleep
|
||||
from pprint import pprint
|
||||
|
||||
parser = lxml.html.HTMLParser(encoding='utf-8')
|
||||
parser = lxml.html.HTMLParser(encoding="utf-8")
|
||||
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
|
||||
|
||||
re_book = re.compile('function refreshGroupBox(group_id, book_id) \{(.*?)\n *\}', re.DOTALL)
|
||||
re_book = re.compile('function refreshGroupBox\(group_id, book_id\) \{(.*?)\n *\}', re.DOTALL)
|
||||
re_book = re.compile(
|
||||
"function refreshGroupBox(group_id, book_id) \{(.*?)\n *\}", re.DOTALL
|
||||
)
|
||||
re_book = re.compile(
|
||||
"function refreshGroupBox\(group_id, book_id\) \{(.*?)\n *\}", re.DOTALL
|
||||
)
|
||||
|
||||
re_tip = re.compile(br'var newTip = new Tip\(\$\(\'[^\']+\'\), "(.*?)", {')
|
||||
re_tip = re.compile(rb'var newTip = new Tip\(\$\(\'[^\']+\'\), "(.*?)", {')
|
||||
|
||||
dirs = ['shelf', 'genre']
|
||||
start = 'https://www.goodreads.com/book/show/'
|
||||
dirs = ["shelf", "genre"]
|
||||
start = "https://www.goodreads.com/book/show/"
|
||||
|
||||
existing = {book["title"] for book in json.load(open("calibre_book_list.json"))}
|
||||
|
||||
existing = {book['title'] for book in json.load(open('calibre_book_list.json'))}
|
||||
|
||||
def iter_books():
|
||||
for d in dirs:
|
||||
for f in sorted(os.listdir(d)):
|
||||
filename = os.path.join(d, f)
|
||||
root = lxml.html.parse(filename, parser=parser).getroot()
|
||||
for div in root.find_class('bookInformation'):
|
||||
link = div.find('.//a')
|
||||
for div in root.find_class("bookInformation"):
|
||||
link = div.find(".//a")
|
||||
rating = div.find('.//span[@class="minirating"]')
|
||||
description = div.find('./div[@class="bookDescription"]')
|
||||
r = rating[0].tail.strip()
|
||||
|
||||
cover = div.getnext().find('.//img').get('src')
|
||||
cover = div.getnext().find(".//img").get("src")
|
||||
|
||||
book = {
|
||||
'title': link.text,
|
||||
'url': link.get('href'),
|
||||
'rating': r,
|
||||
'r': float(r[:3]),
|
||||
'cover': cover,
|
||||
'authors': [a.text for a in div.find_class('authorName')],
|
||||
"title": link.text,
|
||||
"url": link.get("href"),
|
||||
"rating": r,
|
||||
"r": float(r[:3]),
|
||||
"cover": cover,
|
||||
"authors": [a.text for a in div.find_class("authorName")],
|
||||
}
|
||||
if description is not None:
|
||||
index = 1 if len(description) == 3 else 0
|
||||
book['description'] = description[index].text
|
||||
book["description"] = description[index].text
|
||||
yield d, f, book
|
||||
continue
|
||||
|
||||
# print(filename)
|
||||
for line in open(filename, 'rb'):
|
||||
if b'var newTip' not in line:
|
||||
for line in open(filename, "rb"):
|
||||
if b"var newTip" not in line:
|
||||
continue
|
||||
print(line)
|
||||
m = re_tip.search(line)
|
||||
tip = m.group(1).decode('unicode_escape').replace('\/', '/')
|
||||
tip = m.group(1).decode("unicode_escape").replace("\/", "/")
|
||||
# tip = m.group(1) # .replace('\/', '/')
|
||||
# print(tip)
|
||||
if '<ul class="formatting_tips recommendation_tip">' in tip:
|
||||
continue
|
||||
if 'Recommendations are disabled for that shelf.' in tip:
|
||||
if "Recommendations are disabled for that shelf." in tip:
|
||||
continue
|
||||
if 'Customize by selecting your' in tip:
|
||||
if "Customize by selecting your" in tip:
|
||||
continue
|
||||
print(tip)
|
||||
yield (d, f, lxml.html.fromstring(tip))
|
||||
|
||||
|
||||
template = env.get_template('books.html')
|
||||
template = env.get_template("books.html")
|
||||
seen = set()
|
||||
books = []
|
||||
first_authors = set()
|
||||
for d, f, book in sorted(iter_books(), key=lambda i: i[2]['r'], reverse=True):
|
||||
for d, f, book in sorted(iter_books(), key=lambda i: i[2]["r"], reverse=True):
|
||||
# pprint(book)
|
||||
# print(repr(book.get('description')))
|
||||
# continue
|
||||
|
||||
# title_link = book.find_class('bookTitle')[0]
|
||||
url = book['url']
|
||||
url = url[:url.find('?')]
|
||||
url = book["url"]
|
||||
url = url[: url.find("?")]
|
||||
if url in seen:
|
||||
continue
|
||||
seen.add(url)
|
||||
title = book['title']
|
||||
authors = book['authors']
|
||||
title = book["title"]
|
||||
authors = book["authors"]
|
||||
first_authors.add(authors[0])
|
||||
main_title = title
|
||||
# for sep in ['(']:
|
||||
for sep in ':', ' - ', '(', ',':
|
||||
for sep in ":", " - ", "(", ",":
|
||||
if sep in title:
|
||||
main_title = title[:title.find(sep)]
|
||||
main_title = title[: title.find(sep)]
|
||||
break
|
||||
# print((main_title + ' by ' + u', '.join(authors)).encode('utf-8'))
|
||||
if len(main_title) < 10:
|
||||
|
@ -99,29 +104,31 @@ for d, f, book in sorted(iter_books(), key=lambda i: i[2]['r'], reverse=True):
|
|||
if main_title in existing:
|
||||
continue
|
||||
# print(u'{} by {}'.format(main_title, authors[0]).encode('utf-8'))
|
||||
print(u'{}'.format(main_title))
|
||||
print("{}".format(main_title))
|
||||
# print(main_title.encode('utf-8'))
|
||||
assert url.startswith(start)
|
||||
|
||||
filename = 'books/' + url[len(start):] + '.html'
|
||||
filename = "books/" + url[len(start) :] + ".html"
|
||||
# print(filename)
|
||||
if False and not os.path.exists(filename):
|
||||
open(filename, 'w').write(requests.get(url).content)
|
||||
open(filename, "w").write(requests.get(url).content)
|
||||
sleep(1)
|
||||
books.append({
|
||||
'dir': d,
|
||||
'file': f[:-5],
|
||||
'title': title,
|
||||
'main_title': main_title,
|
||||
'authors': authors,
|
||||
'url': url,
|
||||
'rating': book['rating'],
|
||||
'cover': book['cover'],
|
||||
'description': book.get('description'),
|
||||
})
|
||||
books.append(
|
||||
{
|
||||
"dir": d,
|
||||
"file": f[:-5],
|
||||
"title": title,
|
||||
"main_title": main_title,
|
||||
"authors": authors,
|
||||
"url": url,
|
||||
"rating": book["rating"],
|
||||
"cover": book["cover"],
|
||||
"description": book.get("description"),
|
||||
}
|
||||
)
|
||||
|
||||
page = template.render(books=books)
|
||||
open('book_list.html', 'w').write(page)
|
||||
open("book_list.html", "w").write(page)
|
||||
sys.exit(0)
|
||||
|
||||
for a in sorted(first_authors):
|
||||
|
|
Loading…
Reference in a new issue