Files
task-2-1-data-collection/爬虫3.py.txt
2026-03-24 11:32:21 +08:00

26 lines
792 B
Plaintext

import requests
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
}
url = 'https://www.douban.com/doulist/3936288/?start=25&sort=time&playable=0&sub_type='
response = requests.get(url, headers=headers, timeout=10)
response.encoding = 'utf-8'
for page in range(10):
url = f'https://movie.douban.com/top250?start={page*25}'
print(f'正在爬取第 {page+1} 页:{url}')
# print(response.status_code)
soup = BeautifulSoup(response.text, 'html.parser')
movies = []
for a in soup.find_all('a'):
href = a.get('href', '')
if '?' in href:
title = a.get_text(strip=True)
print(title)
movies.append(title)