From d17df9bc785dd760603097f03f20bab0062e88d4 Mon Sep 17 00:00:00 2001 From: 2509165015 <2509165015@student.edu.cn> Date: Tue, 24 Mar 2026 11:33:39 +0800 Subject: [PATCH] =?UTF-8?q?=E7=BD=91=E7=BB=9C=E6=95=B0=E6=8D=AE=E9=87=87?= =?UTF-8?q?=E9=9B=86=EF=BC=88=E7=88=AC=E8=99=AB=E5=9F=BA=E7=A1=80=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 260324+2509165015/douban.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 260324+2509165015/douban.py diff --git a/260324+2509165015/douban.py b/260324+2509165015/douban.py new file mode 100644 index 0000000..48d4848 --- /dev/null +++ b/260324+2509165015/douban.py @@ -0,0 +1,30 @@ +import requests +from bs4 import BeautifulSoup +import time +BASE_URL = "https://xxx.com/list?page=" +START_PAGE = 1 +END_PAGE = 100 +HEADERS = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" +} + +def crawl_page(page): + url = f"{BASE_URL}{page}" + try: + resp = requests.get(url, headers=HEADERS, timeout=10) + resp.raise_for_status() + soup = BeautifulSoup(resp.text, "html.parser") + items = soup.find_all("div", class_="item") + for item in items: + title = item.find("h3").get_text(strip=True) + print(title) + + print(f"第 {page} 页爬取完成") + + except Exception as e: + print(f"第 {page} 页出错:{e}") + +if __name__ == "__main__": + for page in range(START_PAGE, END_PAGE + 1): + crawl_page(page) + time.sleep(1) \ No newline at end of file