网络数据采集(爬虫基础)

This commit is contained in:
2509165020
2026-03-26 15:14:24 +08:00
parent c6f75566e7
commit 17aa0d9d4c
2 changed files with 35 additions and 29 deletions

35
260324+2509165020.py.py Normal file
View File

@@ -0,0 +1,35 @@
import requests
from bs4 import BeautifulSoup
base_url = "https://www.douban.com/doulist/3936287/?start={}&sort=time&playable=0&sub_type="
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
'Referer': 'https://www.douban.com/' }
for page in range(10):
start = page * 25
url = base_url.format(start)
try:
response = requests.get(url, headers=headers, timeout=10)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser')
items = soup.find_all('div', class_='doulist-item')
print(f"===== 第 {page+1} 页 =====")
for item in items:
title_tag = item.find('div', class_='title')
if title_tag and title_tag.a:
movie_title = title_tag.a.get_text(strip=True)
print(movie_title)
except Exception as e:
print(f"{page+1} 页请求失败:{str(e)}")