网络数据采集(爬虫基础)
This commit is contained in:
35
260324+2509165020.py.py
Normal file
35
260324+2509165020.py.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
|
base_url = "https://www.douban.com/doulist/3936287/?start={}&sort=time&playable=0&sub_type="
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
|
||||||
|
'Referer': 'https://www.douban.com/' }
|
||||||
|
|
||||||
|
for page in range(10):
|
||||||
|
|
||||||
|
start = page * 25
|
||||||
|
url = base_url.format(start)
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
|
||||||
|
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
|
||||||
|
items = soup.find_all('div', class_='doulist-item')
|
||||||
|
|
||||||
|
print(f"===== 第 {page+1} 页 =====")
|
||||||
|
for item in items:
|
||||||
|
|
||||||
|
title_tag = item.find('div', class_='title')
|
||||||
|
if title_tag and title_tag.a:
|
||||||
|
movie_title = title_tag.a.get_text(strip=True)
|
||||||
|
print(movie_title)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"第 {page+1} 页请求失败:{str(e)}")
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
import time
|
|
||||||
BASE_URL = "https://www.douban.com/doulist/3936288/"
|
|
||||||
START_PAGE = 1
|
|
||||||
END_PAGE = 100
|
|
||||||
HEADERS = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
|
||||||
}
|
|
||||||
|
|
||||||
def crawl_page(page):
|
|
||||||
url = f"{BASE_URL}{page}"
|
|
||||||
try:
|
|
||||||
resp = requests.get(url, headers=HEADERS, timeout=10)
|
|
||||||
resp.raise_for_status()
|
|
||||||
soup = BeautifulSoup(resp.text, "html.parser")
|
|
||||||
items = soup.find_all("div", class_="item")
|
|
||||||
for item in items:
|
|
||||||
title = item.find("h3").get_text(strip=True)
|
|
||||||
print(title)
|
|
||||||
print(f"<22><> {page} ҳ<><D2B3>ȡ<EFBFBD><C8A1><EFBFBD><EFBFBD>")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"<22><> {page} ҳ<><D2B3><EFBFBD><EFBFBD><EFBFBD><EFBFBD>{e}")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
for page in range(START_PAGE, END_PAGE + 1):
|
|
||||||
crawl_page(page)
|
|
||||||
time.sleep(1)
|
|
||||||
Reference in New Issue
Block a user