From 73d5077cf2594d23ebff641d72c430d643727a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E6=81=A9=E7=90=AA?= <2509165047@student.example.com> Date: Thu, 26 Mar 2026 15:49:07 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E7=88=AC=E8=99=AB=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=20test.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 test.py diff --git a/test.py b/test.py new file mode 100644 index 0000000..a5699a1 --- /dev/null +++ b/test.py @@ -0,0 +1,31 @@ +import requests +from bs4 import BeautifulSoup + +a = 0 +url = f'https://www.douban.com/doulist/3936288/?start={a}&sort=time&playable=0&sub_type=' + +headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'} + +# print(soup) +# print('---------------------------I) +# print('---------------------------') +# print('') + +for page in range(10): + a = page*25 + url = f'https://www.douban.com/doulist/3936288/?start={a}&sort=time&playable=0&sub_type=' + print("爬取第{c}页内容".format(c=page+1)) + response = requests.get(url, headers=headers, timeout=10) + response.encoding = 'utf-8' + soup = BeautifulSoup(response.text, 'html.parser') + print(soup) +# for b in soup.find_all('a'): +# print(a) +# href = b.get('href','') +# if '/subject/' in href: +# title = b.get_text(strip=True) +# print(title) +# //*[@id="17023343"]/div/div[2]/div[4]/a + +c = soup.select('p') +print(c)