提交爬虫代码 test.py
This commit is contained in:
31
test.py
Normal file
31
test.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
a = 0
|
||||
url = f'https://www.douban.com/doulist/3936288/?start={a}&sort=time&playable=0&sub_type='
|
||||
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
|
||||
|
||||
# print(soup)
|
||||
# print('---------------------------I)
|
||||
# print('---------------------------')
|
||||
# print('')
|
||||
|
||||
for page in range(10):
|
||||
a = page*25
|
||||
url = f'https://www.douban.com/doulist/3936288/?start={a}&sort=time&playable=0&sub_type='
|
||||
print("爬取第{c}页内容".format(c=page+1))
|
||||
response = requests.get(url, headers=headers, timeout=10)
|
||||
response.encoding = 'utf-8'
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
print(soup)
|
||||
# for b in soup.find_all('a'):
|
||||
# print(a)
|
||||
# href = b.get('href','')
|
||||
# if '/subject/' in href:
|
||||
# title = b.get_text(strip=True)
|
||||
# print(title)
|
||||
# //*[@id="17023343"]/div/div[2]/div[4]/a
|
||||
|
||||
c = soup.select('p')
|
||||
print(c)
|
||||
Reference in New Issue
Block a user