Python文件操作完全指南
This commit is contained in:
24
0331+2509165015/Text.1.py
Normal file
24
0331+2509165015/Text.1.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import requests
|
||||||
|
import re
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
|
||||||
|
}
|
||||||
|
url = "https://movie.douban.com/top250"
|
||||||
|
try:
|
||||||
|
response = requests.get(url=url, headers=headers, timeout=10)
|
||||||
|
response.raise_for_status()
|
||||||
|
page_source = response.text
|
||||||
|
print("页面请求成功!")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"请求失败:{e}")
|
||||||
|
exit()
|
||||||
|
pattern = re.compile(r'<span class="title">([^&]+?)</span>', re.S)
|
||||||
|
movie_names = pattern.findall(page_source)
|
||||||
|
target_names = movie_names[:10]
|
||||||
|
with open("movies.txt", "w", encoding="utf-8") as f:
|
||||||
|
for name in target_names:
|
||||||
|
f.write(name + "\n")
|
||||||
|
print(f"成功爬取{len(target_names)}部电影名,已保存到movies.txt!")
|
||||||
|
print("\n爬取结果预览:")
|
||||||
|
for i, name in enumerate(target_names, 1):
|
||||||
|
print(f"{i}. {name}")
|
||||||
45
0331+2509165015/Text.2.py
Normal file
45
0331+2509165015/Text.2.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
import requests
|
||||||
|
import re
|
||||||
|
import csv
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
|
||||||
|
"Referer": "https://movie.douban.com/"
|
||||||
|
}
|
||||||
|
|
||||||
|
movie_pattern = re.compile(
|
||||||
|
r'<div class="item">.*?'
|
||||||
|
r'<em class="">(\d+)</em>.*?'
|
||||||
|
r'<span class="title">([^&]+?)</span>.*?'
|
||||||
|
r'<span class="other">/ ([^<]+?)</span>.*?'
|
||||||
|
r'<span class="rating_num" property="v:average">(\d+\.\d+)</span>',
|
||||||
|
re.S
|
||||||
|
)
|
||||||
|
|
||||||
|
def crawl_douban_top250():
|
||||||
|
all_movies = []
|
||||||
|
for page in range(0, 250, 25):
|
||||||
|
url = f"https://movie.douban.com/top250?start={page}&filter="
|
||||||
|
try:
|
||||||
|
time.sleep(random.uniform(1, 2))
|
||||||
|
response = requests.get(url=url, headers=headers, timeout=15)
|
||||||
|
response.raise_for_status()
|
||||||
|
page_source = response.text
|
||||||
|
movies = movie_pattern.findall(page_source)
|
||||||
|
all_movies.extend(movies)
|
||||||
|
print(f"第{page//25 + 1}页爬取成功")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"第{page//25 + 1}页爬取失败:{e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open("movies.csv", "w", encoding="utf-8", newline="") as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow(["排名", "中文名", "英文名", "评分"])
|
||||||
|
writer.writerows(all_movies)
|
||||||
|
|
||||||
|
print(f"全部爬取完成!共获取{len(all_movies)}部电影,已保存到movies.csv")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
crawl_douban_top250()
|
||||||
73
0331+2509165015/Text.3.py
Normal file
73
0331+2509165015/Text.3.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
|
||||||
|
"Accept-Language": "zh-CN,zh;q=0.9",
|
||||||
|
"Referer": "https://movie.douban.com/"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def parse_movie_info(item):
|
||||||
|
"""解析单部电影信息"""
|
||||||
|
try:
|
||||||
|
rank = item.find("em").text.strip()
|
||||||
|
title = item.find("span", class_="title").text.strip()
|
||||||
|
|
||||||
|
other_span = item.find("span", class_="other")
|
||||||
|
en_title = other_span.text.strip().replace("/ ", "") if other_span else ""
|
||||||
|
|
||||||
|
rating = item.find("span", class_="rating_num").text.strip()
|
||||||
|
|
||||||
|
quote_span = item.find("span", class_="inq")
|
||||||
|
quote = quote_span.text.strip() if quote_span else ""
|
||||||
|
info_p = item.find("div", class_="bd").find("p").text
|
||||||
|
year = re.search(r"(\d{4})", info_p).group(1) if re.search(r"(\d{4})", info_p) else ""
|
||||||
|
|
||||||
|
return {
|
||||||
|
"rank": int(rank),
|
||||||
|
"title": title,
|
||||||
|
"en_title": en_title,
|
||||||
|
"rating": float(rating),
|
||||||
|
"quote": quote,
|
||||||
|
"year": year
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
print(f"解析电影信息失败:{e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def crawl_douban_top250():
|
||||||
|
"""爬取豆瓣Top250全量数据并保存为JSON"""
|
||||||
|
all_movies = []
|
||||||
|
base_url = "https://movie.douban.com/top250"
|
||||||
|
for page_num in range(10):
|
||||||
|
url = f"{base_url}?start={page_num * 25}&filter="
|
||||||
|
try:
|
||||||
|
time.sleep(random.uniform(1.5, 2.5))
|
||||||
|
response = requests.get(url, headers=headers, timeout=15)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
movie_items = soup.find_all("div", class_="item")
|
||||||
|
for item in movie_items:
|
||||||
|
movie_info = parse_movie_info(item)
|
||||||
|
if movie_info:
|
||||||
|
all_movies.append(movie_info)
|
||||||
|
|
||||||
|
print(f"✅ 第{page_num + 1}页爬取完成,已获取{len(movie_items)}部电影")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 第{page_num + 1}页爬取失败:{str(e)[:50]}...")
|
||||||
|
continue
|
||||||
|
with open("movies.json", "w", encoding="utf-8") as f:
|
||||||
|
json.dump(all_movies, f, ensure_ascii=False, indent=2)
|
||||||
|
|
||||||
|
print(f"\n🎉 爬取完成!共收录{len(all_movies)}部电影")
|
||||||
|
print(f"📄 文件保存路径:movies.json")
|
||||||
|
return all_movies
|
||||||
|
if __name__ == "__main__":
|
||||||
|
crawl_douban_top250()
|
||||||
1
0331+2509165015/movies.csv
Normal file
1
0331+2509165015/movies.csv
Normal file
@@ -0,0 +1 @@
|
|||||||
|
排名,中文名,英文名,评分
|
||||||
|
2002
0331+2509165015/movies.json
Normal file
2002
0331+2509165015/movies.json
Normal file
File diff suppressed because it is too large
Load Diff
10
0331+2509165015/movies.txt
Normal file
10
0331+2509165015/movies.txt
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
肖申克的救赎
|
||||||
|
霸王别姬
|
||||||
|
泰坦尼克号
|
||||||
|
阿甘正传
|
||||||
|
千与千寻
|
||||||
|
美丽人生
|
||||||
|
星际穿越
|
||||||
|
这个杀手不太冷
|
||||||
|
盗梦空间
|
||||||
|
楚门的世界
|
||||||
Reference in New Issue
Block a user