上传文件至 /

This commit is contained in:
2026-03-26 16:12:54 +08:00
parent 299fd765a2
commit 088289da08

View File

@@ -1,27 +1,24 @@
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
url = f'https://www.douban.com/doulist/3936288/?start=0&sort=time&playable=0&sub_type='
headers = {'user-Agent':'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36(KHTML,like Gecko) Chrome/120.0.0.0 Safari/537.36'} headers = {'user-Agent':'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36(KHTML,like Gecko) Chrome/120.0.0.0 Safari/537.36'}
url = 'https://www.douban.com/doulist/3936288/?start=0&sort=time&playable=0&sub_type=' for page in range(10):
for page in range(0,250,25): a = page*25
url = url.format(start=page) url = f'https://www.douban.com/doulist/3936288/?start=0&sort=time&playable=0&sub_type='
print(f'正在爬取第{page//25+1}页:{url}') print('爬取第{c}页内容'.format(c=page+1))
response =requests.get(url,headers=headers,timeout=10) response =requests.get(url,headers=headers,timeout=10)
response.encoding = 'utf-8' response.encoding = 'utf-8'
#print(response.status_code) #print(response.status_code)
soup = BeautifulSoup(response.text,'html.parser') soup = BeautifulSoup(response.text,'html.parser')
print(soup)
movies = [] #movies = []
#for b in soup.find_all('a'):
for a in soup.find_all('a'): # href = b.get('href','')
href = a.get('href','') # if '/subject' in href:
if '/subject' in href: # title = b.get_text(strip=True)
title = a.get_text(strip=True) # print(title)
print(title) # movies.append(title)
movies.append(title) #print('----------------')
print('----------------') #print(movies)
print(movies)