goodreads-backup/get.py

98 lines
2.3 KiB
Python
Executable file

#!/usr/bin/python3
import requests
from http.cookiejar import LWPCookieJar
import os
import re
import lxml.html
from random import shuffle
from time import sleep
re_recommend = re.compile(
' <a class="actionLinkLite " href="(/recommendations/([^/]*?)/([^/]*?))">'
)
s = requests.Session()
cookie_dir = "/home/edward/lib/cookies"
cookie_file = os.path.join(cookie_dir, "goodreads")
cj = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cj.load()
s.cookies = cj
def login():
sign_in_page = "https://www.goodreads.com/user/sign_in"
page = s.get(sign_in_page).text
open("sign_in.html", "w").write(page)
if '"name":"Edward Betts"' in page:
return # already signed in
re_token = re.compile(
'<input type="hidden" name="authenticity_token" value="([^"]*?)" />'
)
re_n = re.compile("<input name='n' type='hidden' value='(\d+)'>")
token = re_token.search(page).group(1)
data = {
"utf8": "\u2713",
"authenticity_token": token,
"user[email]": "edward@4angle.com",
"user[password]": "8V8~9:3~U!Ly",
"remember_me": 1,
"next": "Sign in",
"n": re_n.search(page).group(1),
}
print(token)
print(data["n"])
r = s.post(sign_in_page, data=data, headers={"referer": sign_in_page})
open("signed_in.html", "w").write(r.text)
root = lxml.html.fromstring(r.content)
flash = root.find_class("flash")
if flash:
print("flash:", flash[0].text)
cj.save(ignore_discard=True)
def get_index():
# url = 'https://www.goodreads.com/recommendations'
url = "https://www.goodreads.com/recommendations/?recs_current_view=list"
r = s.get(url)
open("recommendations.html", "w").write(r.text)
def get_individual():
for line in open("recommendations.html"):
if "actionLinkLite" not in line:
continue
m = re_recommend.match(line)
if m:
yield m.groups()
# art = 'https://www.goodreads.com/recommendations/genre/art'
login()
get_index()
recommend_list = list(get_individual())
shuffle(recommend_list)
headers = {"Accept": "text/html"}
for a, b, c in recommend_list:
print((b, c))
url = "https://www.goodreads.com" + a
r = s.get(url, headers=headers)
filename = os.path.join(b, c + ".html")
open(filename, "w").write(r.text)
sleep(0.5)