2024-04-17 09:11:41 +01:00
|
|
|
#!/usr/bin/python3
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import lxml.html
|
|
|
|
import jinja2
|
|
|
|
import json
|
|
|
|
import sys
|
|
|
|
import requests
|
|
|
|
from time import sleep
|
|
|
|
from pprint import pprint
|
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
parser = lxml.html.HTMLParser(encoding="utf-8")
|
2024-04-17 09:11:41 +01:00
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
|
2024-04-17 09:11:41 +01:00
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
re_book = re.compile(
|
|
|
|
"function refreshGroupBox(group_id, book_id) \{(.*?)\n *\}", re.DOTALL
|
|
|
|
)
|
|
|
|
re_book = re.compile(
|
|
|
|
"function refreshGroupBox\(group_id, book_id\) \{(.*?)\n *\}", re.DOTALL
|
|
|
|
)
|
2024-04-17 09:11:41 +01:00
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
re_tip = re.compile(rb'var newTip = new Tip\(\$\(\'[^\']+\'\), "(.*?)", {')
|
2024-04-17 09:11:41 +01:00
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
dirs = ["shelf", "genre"]
|
|
|
|
start = "https://www.goodreads.com/book/show/"
|
|
|
|
|
|
|
|
existing = {book["title"] for book in json.load(open("calibre_book_list.json"))}
|
2024-04-17 09:11:41 +01:00
|
|
|
|
|
|
|
|
|
|
|
def iter_books():
|
|
|
|
for d in dirs:
|
|
|
|
for f in sorted(os.listdir(d)):
|
|
|
|
filename = os.path.join(d, f)
|
|
|
|
root = lxml.html.parse(filename, parser=parser).getroot()
|
2024-04-17 09:12:11 +01:00
|
|
|
for div in root.find_class("bookInformation"):
|
|
|
|
link = div.find(".//a")
|
2024-04-17 09:11:41 +01:00
|
|
|
rating = div.find('.//span[@class="minirating"]')
|
|
|
|
description = div.find('./div[@class="bookDescription"]')
|
|
|
|
r = rating[0].tail.strip()
|
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
cover = div.getnext().find(".//img").get("src")
|
2024-04-17 09:11:41 +01:00
|
|
|
|
|
|
|
book = {
|
2024-04-17 09:12:11 +01:00
|
|
|
"title": link.text,
|
|
|
|
"url": link.get("href"),
|
|
|
|
"rating": r,
|
|
|
|
"r": float(r[:3]),
|
|
|
|
"cover": cover,
|
|
|
|
"authors": [a.text for a in div.find_class("authorName")],
|
2024-04-17 09:11:41 +01:00
|
|
|
}
|
|
|
|
if description is not None:
|
|
|
|
index = 1 if len(description) == 3 else 0
|
2024-04-17 09:12:11 +01:00
|
|
|
book["description"] = description[index].text
|
2024-04-17 09:11:41 +01:00
|
|
|
yield d, f, book
|
|
|
|
continue
|
|
|
|
|
|
|
|
# print(filename)
|
2024-04-17 09:12:11 +01:00
|
|
|
for line in open(filename, "rb"):
|
|
|
|
if b"var newTip" not in line:
|
2024-04-17 09:11:41 +01:00
|
|
|
continue
|
|
|
|
print(line)
|
|
|
|
m = re_tip.search(line)
|
2024-04-17 09:12:11 +01:00
|
|
|
tip = m.group(1).decode("unicode_escape").replace("\/", "/")
|
2024-04-17 09:11:41 +01:00
|
|
|
# tip = m.group(1) # .replace('\/', '/')
|
|
|
|
# print(tip)
|
|
|
|
if '<ul class="formatting_tips recommendation_tip">' in tip:
|
|
|
|
continue
|
2024-04-17 09:12:11 +01:00
|
|
|
if "Recommendations are disabled for that shelf." in tip:
|
2024-04-17 09:11:41 +01:00
|
|
|
continue
|
2024-04-17 09:12:11 +01:00
|
|
|
if "Customize by selecting your" in tip:
|
2024-04-17 09:11:41 +01:00
|
|
|
continue
|
|
|
|
print(tip)
|
|
|
|
yield (d, f, lxml.html.fromstring(tip))
|
|
|
|
|
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
template = env.get_template("books.html")
|
2024-04-17 09:11:41 +01:00
|
|
|
seen = set()
|
|
|
|
books = []
|
|
|
|
first_authors = set()
|
2024-04-17 09:12:11 +01:00
|
|
|
for d, f, book in sorted(iter_books(), key=lambda i: i[2]["r"], reverse=True):
|
2024-04-17 09:11:41 +01:00
|
|
|
# pprint(book)
|
|
|
|
# print(repr(book.get('description')))
|
|
|
|
# continue
|
|
|
|
|
|
|
|
# title_link = book.find_class('bookTitle')[0]
|
2024-04-17 09:12:11 +01:00
|
|
|
url = book["url"]
|
|
|
|
url = url[: url.find("?")]
|
2024-04-17 09:11:41 +01:00
|
|
|
if url in seen:
|
|
|
|
continue
|
|
|
|
seen.add(url)
|
2024-04-17 09:12:11 +01:00
|
|
|
title = book["title"]
|
|
|
|
authors = book["authors"]
|
2024-04-17 09:11:41 +01:00
|
|
|
first_authors.add(authors[0])
|
|
|
|
main_title = title
|
|
|
|
# for sep in ['(']:
|
2024-04-17 09:12:11 +01:00
|
|
|
for sep in ":", " - ", "(", ",":
|
2024-04-17 09:11:41 +01:00
|
|
|
if sep in title:
|
2024-04-17 09:12:11 +01:00
|
|
|
main_title = title[: title.find(sep)]
|
2024-04-17 09:11:41 +01:00
|
|
|
break
|
|
|
|
# print((main_title + ' by ' + u', '.join(authors)).encode('utf-8'))
|
|
|
|
if len(main_title) < 10:
|
|
|
|
continue
|
|
|
|
if main_title in existing:
|
|
|
|
continue
|
|
|
|
# print(u'{} by {}'.format(main_title, authors[0]).encode('utf-8'))
|
2024-04-17 09:12:11 +01:00
|
|
|
print("{}".format(main_title))
|
2024-04-17 09:11:41 +01:00
|
|
|
# print(main_title.encode('utf-8'))
|
|
|
|
assert url.startswith(start)
|
|
|
|
|
2024-04-17 09:12:11 +01:00
|
|
|
filename = "books/" + url[len(start) :] + ".html"
|
2024-04-17 09:11:41 +01:00
|
|
|
# print(filename)
|
|
|
|
if False and not os.path.exists(filename):
|
2024-04-17 09:12:11 +01:00
|
|
|
open(filename, "w").write(requests.get(url).content)
|
2024-04-17 09:11:41 +01:00
|
|
|
sleep(1)
|
2024-04-17 09:12:11 +01:00
|
|
|
books.append(
|
|
|
|
{
|
|
|
|
"dir": d,
|
|
|
|
"file": f[:-5],
|
|
|
|
"title": title,
|
|
|
|
"main_title": main_title,
|
|
|
|
"authors": authors,
|
|
|
|
"url": url,
|
|
|
|
"rating": book["rating"],
|
|
|
|
"cover": book["cover"],
|
|
|
|
"description": book.get("description"),
|
|
|
|
}
|
|
|
|
)
|
2024-04-17 09:11:41 +01:00
|
|
|
|
|
|
|
page = template.render(books=books)
|
2024-04-17 09:12:11 +01:00
|
|
|
open("book_list.html", "w").write(page)
|
2024-04-17 09:11:41 +01:00
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
for a in sorted(first_authors):
|
|
|
|
print(a)
|
|
|
|
|
|
|
|
# authors = u' OR '.join(u'"{}"'.format(a) for a in sorted(first_authors) if a not in {'Hugh Howey', 'Elizabeth Moon', 'Max Hastings'})
|
|
|
|
# print(authors.encode('utf-8'))
|