完成作业一
This commit is contained in:
30
爬虫/爬虫.py (2).txt
Normal file
30
爬虫/爬虫.py (2).txt
Normal file
@@ -0,0 +1,30 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import time
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
all_movies = []
|
||||
|
||||
for page in range(0, 250, 25):
|
||||
url = f"https://movie.douban.com/top250?start={page}&filter="
|
||||
print(f"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ȡ<EFBFBD><C8A1> {page//25 + 1} ҳ<><D2B3>{url}")
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
response.encoding = "utf-8"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
|
||||
items = soup.find_all("div", class_="item")
|
||||
for item in items:
|
||||
title = item.find("span", class_="title").get_text(strip=True)
|
||||
all_movies.append(title)
|
||||
print(title)
|
||||
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
print(f"\nһ<6E><D2BB><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӱ<EFBFBD><D3B0>{len(all_movies)} <20><>")
|
||||
Reference in New Issue
Block a user