网络数据采集(爬虫基础)
This commit is contained in:
19
爬虫2/爬虫课堂作业3.19.py.txt
Normal file
19
爬虫2/爬虫课堂作业3.19.py.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
url = 'https://movie.douban.com/top250'
|
||||
params = {'start': '0', 'filter': ''}
|
||||
response = requests.get(url, params=params)
|
||||
if response.status_code == 200:
|
||||
html_content = response.text
|
||||
print("请求成功,获取到 HTML 内容")
|
||||
else:
|
||||
print(f"请求失败,状态码: {response.status_code}")
|
||||
soup = BeautifulSoup(html_content, 'lxml')
|
||||
title = soup.find('title').string
|
||||
print("页面标题:", title)
|
||||
links = soup.find_all('a')
|
||||
for link in links:
|
||||
print("链接地址:", link.get('href'))
|
||||
div_elements = soup.select('div.item')
|
||||
for div in div_elements:
|
||||
print("电影条目内容:", div.text)
|
||||
Reference in New Issue
Block a user