完成作业三:完成作业二:网络数据采集(爬虫基础)
This commit is contained in:
27
2.py
Normal file
27
2.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup as bs
|
||||
print('------------')
|
||||
url = 'https://www.baidu.com'
|
||||
params = {'key':'value'}
|
||||
response = requests.get(url,params=params)
|
||||
print(response.status_code)
|
||||
print('------------')
|
||||
html_content = response.text
|
||||
print(html_content)
|
||||
print('------------')
|
||||
soup = bs(html_content,'lxml')
|
||||
print(soup)
|
||||
print('============')
|
||||
title = soup.find('title').string
|
||||
print(title)
|
||||
print('============')
|
||||
links = soup.find_all('a')
|
||||
print(links)
|
||||
print('============')
|
||||
for link in links:
|
||||
# print("11111111")
|
||||
print("链接:",link.get('href'))
|
||||
div_element = soup.select('div.di')
|
||||
print(div_element)
|
||||
for div in div_element:
|
||||
print('div:',div.text)
|
||||
Reference in New Issue
Block a user