正则表达式:爬虫内容提取利器
This commit is contained in:
45
0402+2509165015.CSV
Normal file
45
0402+2509165015.CSV
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import csv
|
||||||
|
import time
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
|
}
|
||||||
|
|
||||||
|
movies = []
|
||||||
|
|
||||||
|
for start in range(0, 250, 25):
|
||||||
|
url = f"https://movie.douban.com/top250?start={start}"
|
||||||
|
res = requests.get(url, headers=headers)
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
items = soup.find_all("div", class_="item")
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
rank = item.find("em").text
|
||||||
|
title = item.find("span", class_="title").text
|
||||||
|
rating = item.find("span", class_="rating_num").text
|
||||||
|
people = item.find("div", class_="star").find_all("span")[-1].text.replace("人评价", "")
|
||||||
|
quote = item.find("span", class_="inq").text if item.find("span", class_="inq") else "无"
|
||||||
|
info = item.find("p", class_="").text.strip().split("\n")
|
||||||
|
line1 = info[0].strip()
|
||||||
|
line2 = info[1].strip() if len(info) > 1 else ""
|
||||||
|
|
||||||
|
director = line1.split("导演: ")[1].split("主演: ")[0].strip() if "导演: " in line1 else "未知"
|
||||||
|
actor = line1.split("主演: ")[1].strip() if "主演: " in line1 else "未知"
|
||||||
|
parts = line2.split("/") if line2 else []
|
||||||
|
year = parts[0].strip() if len(parts) >= 1 else "未知"
|
||||||
|
area = parts[1].strip() if len(parts) >= 2 else "未知"
|
||||||
|
genre = parts[2].strip() if len(parts) >= 3 else "未知"
|
||||||
|
|
||||||
|
movies.append({
|
||||||
|
"排名": rank, "电影名": title, "评分": rating, "评价人数": people, "经典台词": quote,
|
||||||
|
"导演": director, "主演": actor, "年份": year, "地区": area, "类型": genre
|
||||||
|
})
|
||||||
|
time.sleep(1)
|
||||||
|
print(f"已爬取 {start + 25} 条")
|
||||||
|
with open("douban_top250.csv", "w", encoding="utf-8", newline="") as f:
|
||||||
|
writer = csv.DictWriter(f, fieldnames=movies[0].keys())
|
||||||
|
writer.writeheader()
|
||||||
|
writer.writerows(movies)
|
||||||
|
print("✅ CSV 导出完成")
|
||||||
|
Can't render this file because it contains an unexpected character in line 7 and column 16.
|
46
0402+2509165015.JSON
Normal file
46
0402+2509165015.JSON
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
|
}
|
||||||
|
|
||||||
|
movies = []
|
||||||
|
|
||||||
|
for start in range(0, 250, 25):
|
||||||
|
url = f"https://movie.douban.com/top250?start={start}"
|
||||||
|
res = requests.get(url, headers=headers)
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
items = soup.find_all("div", class_="item")
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
rank = item.find("em").text
|
||||||
|
title = item.find("span", class_="title").text
|
||||||
|
rating = item.find("span", class_="rating_num").text
|
||||||
|
people = item.find("div", class_="star").find_all("span")[-1].text.replace("人评价", "")
|
||||||
|
quote = item.find("span", class_="inq").text if item.find("span", class_="inq") else "无"
|
||||||
|
info = item.find("p", class_="").text.strip().split("\n")
|
||||||
|
line1 = info[0].strip()
|
||||||
|
line2 = info[1].strip() if len(info) > 1 else ""
|
||||||
|
|
||||||
|
director = line1.split("导演: ")[1].split("主演: ")[0].strip() if "导演: " in line1 else "未知"
|
||||||
|
actor = line1.split("主演: ")[1].strip() if "主演: " in line1 else "未知"
|
||||||
|
parts = line2.split("/") if line2 else []
|
||||||
|
year = parts[0].strip() if len(parts) >= 1 else "未知"
|
||||||
|
area = parts[1].strip() if len(parts) >= 2 else "未知"
|
||||||
|
genre = parts[2].strip() if len(parts) >= 3 else "未知"
|
||||||
|
|
||||||
|
movies.append({
|
||||||
|
"排名": rank, "电影名": title, "评分": rating, "评价人数": people, "经典台词": quote,
|
||||||
|
"导演": director, "主演": actor, "年份": year, "地区": area, "类型": genre
|
||||||
|
})
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
print(f"已爬取 {start + 25} 条")
|
||||||
|
|
||||||
|
with open("douban_top250.json", "w", encoding="utf-8") as f:
|
||||||
|
json.dump(movies, f, ensure_ascii=False, indent=2)
|
||||||
|
|
||||||
|
print("✅ JSON 导出完成")
|
||||||
50
0402+2509165015.txt
Normal file
50
0402+2509165015.txt
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import time
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
|
}
|
||||||
|
|
||||||
|
movies = []
|
||||||
|
|
||||||
|
for start in range(0, 250, 25):
|
||||||
|
url = f"https://movie.douban.com/top250?start={start}"
|
||||||
|
res = requests.get(url, headers=headers)
|
||||||
|
soup = BeautifulSoup(res.text, "html.parser")
|
||||||
|
items = soup.find_all("div", class_="item")
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
rank = item.find("em").text
|
||||||
|
title = item.find("span", class_="title").text
|
||||||
|
rating = item.find("span", class_="rating_num").text
|
||||||
|
people = item.find("div", class_="star").find_all("span")[-1].text.replace("<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>", "")
|
||||||
|
quote = item.find("span", class_="inq").text if item.find("span", class_="inq") else "<22><>"
|
||||||
|
info = item.find("p", class_="").text.strip().split("\n")
|
||||||
|
line1 = info[0].strip()
|
||||||
|
line2 = info[1].strip() if len(info) > 1 else ""
|
||||||
|
|
||||||
|
director = line1.split("<22><><EFBFBD><EFBFBD>: ")[1].split("<22><><EFBFBD><EFBFBD>: ")[0].strip() if "<22><><EFBFBD><EFBFBD>: " in line1 else "δ֪"
|
||||||
|
actor = line1.split("<22><><EFBFBD><EFBFBD>: ")[1].strip() if "<22><><EFBFBD><EFBFBD>: " in line1 else "δ֪"
|
||||||
|
parts = line2.split("/") if line2 else []
|
||||||
|
year = parts[0].strip() if len(parts) >= 1 else "δ֪"
|
||||||
|
area = parts[1].strip() if len(parts) >= 2 else "δ֪"
|
||||||
|
genre = parts[2].strip() if len(parts) >= 3 else "δ֪"
|
||||||
|
|
||||||
|
movies.append({
|
||||||
|
"<22><><EFBFBD><EFBFBD>": rank, "<22><>Ӱ<EFBFBD><D3B0>": title, "<22><><EFBFBD><EFBFBD>": rating, "<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>": people, "<22><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>": quote,
|
||||||
|
"<22><><EFBFBD><EFBFBD>": director, "<22><><EFBFBD><EFBFBD>": actor, "<22><><EFBFBD><EFBFBD>": year, "<22><><EFBFBD><EFBFBD>": area, "<22><><EFBFBD><EFBFBD>": genre
|
||||||
|
})
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
print(f"<22><><EFBFBD><EFBFBD>ȡ {start + 25} <20><>")
|
||||||
|
|
||||||
|
with open("douban_top250.txt", "w", encoding="utf-8") as f:
|
||||||
|
for m in movies:
|
||||||
|
f.write(f"<22><>{m['<27><><EFBFBD><EFBFBD>']}<7D><>{m['<27><>Ӱ<EFBFBD><D3B0>']}\n")
|
||||||
|
f.write(f"<22><><EFBFBD>֣<EFBFBD>{m['<27><><EFBFBD><EFBFBD>']} <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>{m['<27><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>']}\n")
|
||||||
|
f.write(f"<22><><EFBFBD>ݣ<EFBFBD>{m['<27><><EFBFBD><EFBFBD>']} <20><><EFBFBD>ݣ<EFBFBD>{m['<27><><EFBFBD><EFBFBD>']}\n")
|
||||||
|
f.write(f"<22><><EFBFBD>ݣ<EFBFBD>{m['<27><><EFBFBD><EFBFBD>']} <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>{m['<27><><EFBFBD><EFBFBD>']} <20><><EFBFBD>ͣ<EFBFBD>{m['<27><><EFBFBD><EFBFBD>']}\n")
|
||||||
|
f.write(f"̨<>ʣ<EFBFBD>{m['<27><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>']}\n\n")
|
||||||
|
|
||||||
|
print("? TXT <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>")
|
||||||
Reference in New Issue
Block a user