-
Notifications
You must be signed in to change notification settings - Fork 20
/
website_weread.py
143 lines (116 loc) · 4.36 KB
/
website_weread.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# -*- coding: utf-8 -*-
import contextlib
import json
import pathlib
import re
import typing
from itertools import chain
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from utils import current_date, current_time, get_weread_id, logger, write_text_file
url = "https://weread.qq.com/web/bookListInCategory/rising?rank=1"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
}
retries = Retry(
total=3, backoff_factor=1, status_forcelist=[k for k in range(400, 600)]
)
@contextlib.contextmanager
def request_session():
s = requests.session()
try:
s.headers.update(headers)
s.mount("http://", HTTPAdapter(max_retries=retries))
s.mount("https://", HTTPAdapter(max_retries=retries))
yield s
finally:
s.close()
class WebSiteWeRead:
@staticmethod
def get_raw() -> dict:
ret = {}
try:
with request_session() as s:
resp = s.get(url)
ret = resp.json()
except:
logger.exception("get data failed")
return ret
@staticmethod
def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:
ret: typing.List[typing.Dict[str, typing.Any]] = []
for item in raw_data["books"]:
ret.append(
{
"title": item["bookInfo"]["title"],
"url": f"https://weread.qq.com/web/bookDetail/{get_weread_id(item['bookInfo']['bookId'])}",
}
)
return ret
@staticmethod
def read_already_download(
full_path: str,
) -> typing.List[typing.Dict[str, typing.Any]]:
content: typing.List[typing.Dict[str, typing.Any]] = []
if pathlib.Path(full_path).exists():
with open(full_path) as fd:
content = json.loads(fd.read())
return content
@staticmethod
def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:
topics = []
template = """<!-- BEGIN WEREAD -->
<!-- 最后更新时间 {update_time} -->
{topics}
<!-- END WEREAD -->"""
for item in content:
topics.append(f"1. [{item['title']}]({item['url']})")
template = template.replace("{update_time}", current_time())
template = template.replace("{topics}", "\n".join(topics))
return template
@staticmethod
def create_raw(full_path: str, raw: str) -> None:
write_text_file(full_path, raw)
@staticmethod
def merge_data(
cur: typing.List[typing.Dict[str, typing.Any]],
another: typing.List[typing.Dict[str, typing.Any]],
):
merged_dict: typing.Dict[str, typing.Any] = {}
for item in chain(cur, another):
merged_dict[item["url"]] = item["title"]
return [{"url": k, "title": v} for k, v in merged_dict.items()]
def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:
with open("./README.md", "r") as fd:
readme = fd.read()
return re.sub(
r"<!-- BEGIN WEREAD -->[\W\w]*<!-- END WEREAD -->",
self.create_list(content),
readme,
)
def create_archive(
self, content: typing.List[typing.Dict[str, typing.Any]], date: str
) -> str:
return f"# {date}\n\n共 {len(content)} 条\n\n{self.create_list(content)}"
def run(self):
dir_name = "weread"
raw_data = self.get_raw()
cleaned_data = self.clean_raw(raw_data)
cur_date = current_date()
# 写入原始数据
raw_path = f"./raw/{dir_name}/{cur_date}.json"
already_download_data = self.read_already_download(raw_path)
merged_data = self.merge_data(cleaned_data, already_download_data)
self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))
# 更新 README
readme_text = self.update_readme(merged_data)
readme_path = "./README.md"
write_text_file(readme_path, readme_text)
# 更新 archive
archive_text = self.create_archive(merged_data, cur_date)
archive_path = f"./archives/{dir_name}/{cur_date}.md"
write_text_file(archive_path, archive_text)
if __name__ == "__main__":
weread_obj = WebsiteWeRead()
weread_obj.run()