Compare commits

..

7 Commits

Author SHA1 Message Date
2509165020
1a3da94247 网络数据采集(爬虫基础) 2026-03-26 15:52:20 +08:00
2509165020
17aa0d9d4c 网络数据采集(爬虫基础) 2026-03-26 15:14:24 +08:00
2509165020
c6f75566e7 网络数据采集(爬虫基础) 2026-03-24 11:36:20 +08:00
2509165020
8d36664c99 网络数据采集(爬虫基础) 2026-03-24 11:29:57 +08:00
2509165020
86246ac68e 网络数据采集(爬虫基础) 2026-03-24 10:41:27 +08:00
2509165020
792d62f71a 网络数据采集(爬虫基础) 2026-03-19 21:25:46 +08:00
2509165020
7be256b0d6 完成课堂作业爬虫3.19 2026-03-19 20:58:44 +08:00
3 changed files with 80 additions and 0 deletions

35
260324+2509165020.py.py Normal file
View File

@@ -0,0 +1,35 @@
import requests
from bs4 import BeautifulSoup
base_url = "https://www.douban.com/doulist/3936287/?start={}&sort=time&playable=0&sub_type="
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
'Referer': 'https://www.douban.com/' }
for page in range(10):
start = page * 25
url = base_url.format(start)
try:
response = requests.get(url, headers=headers, timeout=10)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser')
items = soup.find_all('div', class_='doulist-item')
print(f"===== 第 {page+1} 页 =====")
for item in items:
title_tag = item.find('div', class_='title')
if title_tag and title_tag.a:
movie_title = title_tag.a.get_text(strip=True)
print(movie_title)
except Exception as e:
print(f"{page+1} 页请求失败:{str(e)}")

26
260326+2509165020.py.py Normal file
View File

@@ -0,0 +1,26 @@
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
for i in range(5):
try:
url = 'https://picsum.photos/'
response = requests.get(url, headers=headers, timeout=10)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser')
img_tag = soup.select_one('img.resize')
if img_tag:
img_src = img_tag.get('src')
print(f"{i+1} 张图片链接: {img_src}")
img_response = requests.get(img_src, timeout=10)
with open(f'image_{i+1}.jpg', 'wb') as f:
f.write(img_response.content)
print(f"✅ 第 {i+1} 张下载完成!\n")
except Exception as e:
print(f"❌ 第 {i+1} 张下载失败: {e}\n")
print("🎉 5 张图片全部下载完毕!")

View File

@@ -0,0 +1,19 @@
import requests
from bs4 import BeautifulSoup
url = 'https://movie.douban.com/top250'
params = {'start': '0', 'filter': ''}
response = requests.get(url, params=params)
if response.status_code == 200:
html_content = response.text
print("请求成功,获取到 HTML 内容")
else:
print(f"请求失败,状态码: {response.status_code}")
soup = BeautifulSoup(html_content, 'lxml')
title = soup.find('title').string
print("页面标题:", title)
links = soup.find_all('a')
for link in links:
print("链接地址:", link.get('href'))
div_elements = soup.select('div.item')
for div in div_elements:
print("电影条目内容:", div.text)