完成作业二:第三次爬虫py
This commit is contained in:
26
爬虫3.py.txt
Normal file
26
爬虫3.py.txt
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
url = 'https://www.douban.com/doulist/3936288/?start=25&sort=time&playable=0&sub_type='
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
for page in range(10):
|
||||||
|
url = f'https://movie.douban.com/top250?start={page*25}'
|
||||||
|
print(f'正在爬取第 {page+1} 页:{url}')
|
||||||
|
|
||||||
|
# print(response.status_code)
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
|
||||||
|
movies = []
|
||||||
|
|
||||||
|
for a in soup.find_all('a'):
|
||||||
|
href = a.get('href', '')
|
||||||
|
if '?' in href:
|
||||||
|
title = a.get_text(strip=True)
|
||||||
|
print(title)
|
||||||
|
movies.append(title)
|
||||||
|
|
||||||
Reference in New Issue
Block a user