网络数据采集(爬虫基础)
This commit is contained in:
29
260324+2509165020.py/260324+2509165020.txt
Normal file
29
260324+2509165020.py/260324+2509165020.txt
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import time
|
||||||
|
BASE_URL = "https://xxx.com/list?page="
|
||||||
|
START_PAGE = 1
|
||||||
|
END_PAGE = 100
|
||||||
|
HEADERS = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
||||||
|
}
|
||||||
|
|
||||||
|
def crawl_page(page):
|
||||||
|
url = f"{BASE_URL}{page}"
|
||||||
|
try:
|
||||||
|
resp = requests.get(url, headers=HEADERS, timeout=10)
|
||||||
|
resp.raise_for_status()
|
||||||
|
soup = BeautifulSoup(resp.text, "html.parser")
|
||||||
|
items = soup.find_all("div", class_="item")
|
||||||
|
for item in items:
|
||||||
|
title = item.find("h3").get_text(strip=True)
|
||||||
|
print(title)
|
||||||
|
print(f"<22><> {page} ҳ<><D2B3>ȡ<EFBFBD><C8A1><EFBFBD><EFBFBD>")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"<22><> {page} ҳ<><D2B3><EFBFBD><EFBFBD><EFBFBD><EFBFBD>{e}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for page in range(START_PAGE, END_PAGE + 1):
|
||||||
|
crawl_page(page)
|
||||||
|
time.sleep(1)
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
|
|
||||||
}
|
|
||||||
|
|
||||||
url = 'https://www.douban.com/doulist/3936288/'
|
|
||||||
response = requests.get(url, headers=headers)
|
|
||||||
soup = BeautifulSoup(response.text, 'html.parser')
|
|
||||||
|
|
||||||
titles = soup.select('.title a')
|
|
||||||
for t in titles:
|
|
||||||
print(t.text.strip())
|
|
||||||
Reference in New Issue
Block a user