Compare commits
5 Commits
792d62f71a
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a3da94247 | ||
|
|
17aa0d9d4c | ||
|
|
c6f75566e7 | ||
|
|
8d36664c99 | ||
|
|
86246ac68e |
35
260324+2509165020.py.py
Normal file
35
260324+2509165020.py.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
|
base_url = "https://www.douban.com/doulist/3936287/?start={}&sort=time&playable=0&sub_type="
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
|
||||||
|
'Referer': 'https://www.douban.com/' }
|
||||||
|
|
||||||
|
for page in range(10):
|
||||||
|
|
||||||
|
start = page * 25
|
||||||
|
url = base_url.format(start)
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
|
||||||
|
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
|
||||||
|
items = soup.find_all('div', class_='doulist-item')
|
||||||
|
|
||||||
|
print(f"===== 第 {page+1} 页 =====")
|
||||||
|
for item in items:
|
||||||
|
|
||||||
|
title_tag = item.find('div', class_='title')
|
||||||
|
if title_tag and title_tag.a:
|
||||||
|
movie_title = title_tag.a.get_text(strip=True)
|
||||||
|
print(movie_title)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"第 {page+1} 页请求失败:{str(e)}")
|
||||||
26
260326+2509165020.py.py
Normal file
26
260326+2509165020.py.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
|
||||||
|
|
||||||
|
for i in range(5):
|
||||||
|
try:
|
||||||
|
url = 'https://picsum.photos/'
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
img_tag = soup.select_one('img.resize')
|
||||||
|
|
||||||
|
if img_tag:
|
||||||
|
img_src = img_tag.get('src')
|
||||||
|
print(f"第 {i+1} 张图片链接: {img_src}")
|
||||||
|
img_response = requests.get(img_src, timeout=10)
|
||||||
|
|
||||||
|
with open(f'image_{i+1}.jpg', 'wb') as f:
|
||||||
|
f.write(img_response.content)
|
||||||
|
|
||||||
|
print(f"✅ 第 {i+1} 张下载完成!\n")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 第 {i+1} 张下载失败: {e}\n")
|
||||||
|
|
||||||
|
print("🎉 5 张图片全部下载完毕!")
|
||||||
Reference in New Issue
Block a user