Compare commits
7 Commits
06e4a6216d
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a3da94247 | ||
|
|
17aa0d9d4c | ||
|
|
c6f75566e7 | ||
|
|
8d36664c99 | ||
|
|
86246ac68e | ||
|
|
792d62f71a | ||
|
|
7be256b0d6 |
35
260324+2509165020.py.py
Normal file
35
260324+2509165020.py.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
|
base_url = "https://www.douban.com/doulist/3936287/?start={}&sort=time&playable=0&sub_type="
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
|
||||||
|
'Referer': 'https://www.douban.com/' }
|
||||||
|
|
||||||
|
for page in range(10):
|
||||||
|
|
||||||
|
start = page * 25
|
||||||
|
url = base_url.format(start)
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
|
||||||
|
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
|
||||||
|
items = soup.find_all('div', class_='doulist-item')
|
||||||
|
|
||||||
|
print(f"===== 第 {page+1} 页 =====")
|
||||||
|
for item in items:
|
||||||
|
|
||||||
|
title_tag = item.find('div', class_='title')
|
||||||
|
if title_tag and title_tag.a:
|
||||||
|
movie_title = title_tag.a.get_text(strip=True)
|
||||||
|
print(movie_title)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"第 {page+1} 页请求失败:{str(e)}")
|
||||||
26
260326+2509165020.py.py
Normal file
26
260326+2509165020.py.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
|
||||||
|
|
||||||
|
for i in range(5):
|
||||||
|
try:
|
||||||
|
url = 'https://picsum.photos/'
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
response.encoding = 'utf-8'
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
img_tag = soup.select_one('img.resize')
|
||||||
|
|
||||||
|
if img_tag:
|
||||||
|
img_src = img_tag.get('src')
|
||||||
|
print(f"第 {i+1} 张图片链接: {img_src}")
|
||||||
|
img_response = requests.get(img_src, timeout=10)
|
||||||
|
|
||||||
|
with open(f'image_{i+1}.jpg', 'wb') as f:
|
||||||
|
f.write(img_response.content)
|
||||||
|
|
||||||
|
print(f"✅ 第 {i+1} 张下载完成!\n")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 第 {i+1} 张下载失败: {e}\n")
|
||||||
|
|
||||||
|
print("🎉 5 张图片全部下载完毕!")
|
||||||
19
爬虫2/爬虫课堂作业3.19.py.txt
Normal file
19
爬虫2/爬虫课堂作业3.19.py.txt
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
url = 'https://movie.douban.com/top250'
|
||||||
|
params = {'start': '0', 'filter': ''}
|
||||||
|
response = requests.get(url, params=params)
|
||||||
|
if response.status_code == 200:
|
||||||
|
html_content = response.text
|
||||||
|
print("请求成功,获取到 HTML 内容")
|
||||||
|
else:
|
||||||
|
print(f"请求失败,状态码: {response.status_code}")
|
||||||
|
soup = BeautifulSoup(html_content, 'lxml')
|
||||||
|
title = soup.find('title').string
|
||||||
|
print("页面标题:", title)
|
||||||
|
links = soup.find_all('a')
|
||||||
|
for link in links:
|
||||||
|
print("链接地址:", link.get('href'))
|
||||||
|
div_elements = soup.select('div.item')
|
||||||
|
for div in div_elements:
|
||||||
|
print("电影条目内容:", div.text)
|
||||||
Reference in New Issue
Block a user