-
Notifications
You must be signed in to change notification settings - Fork 1
/
crawlerpy.py
68 lines (56 loc) · 2.03 KB
/
crawlerpy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import os
import requests
from urllib.parse import urljoin
from lxml import html
import time
import random
class Crawler:
def __init__(self):
self.session = requests.Session()
self.visited_pages = set()
def download_page(self, url, base_url):
page_name = url[len(base_url):].rstrip("/").replace("/", "_") + ".html"
page_path = os.path.join(os.getcwd(), page_name)
if os.path.exists(page_path):
print("已存在页面,跳过下载:", url)
return page_path
try:
response = self.session.get(url)
response.raise_for_status()
with open(page_path, 'wb') as f:
f.write(response.content)
self.visited_pages.add(url)
print("已下载页面:", url)
wait_time = random.randint(1, 10)
time.sleep(wait_time)
return page_path
except requests.exceptions.RequestException as e:
print("发生请求异常:", e)
except Exception as e:
print("发生其他异常:", e)
return None
def crawl_page(self, url, base_url):
page_path = self.download_page(url, base_url)
if not page_path:
return
local_base_url = os.path.dirname(page_path)
try:
with open(page_path, 'rb') as f:
tree = html.fromstring(f.read())
for link in tree.xpath('//a/@href'):
link = urljoin(url, link)
if '#' in link or link in self.visited_pages or not link.startswith(base_url):
continue
self.visited_pages.add(link)
self.crawl_page(link, base_url)
except Exception as e:
print("处理页面时发生异常:", e)
def close(self):
self.session.close()
crawler = Crawler()
try:
start_url = ""
base_url = ""
crawler.crawl_page(start_url, base_url)
finally:
crawler.close()