requests库
安装:
$ pip3 install requests
基本使用:
代码:
import requests
url = 'http://www.httpbin.org/' # 请求路径
data = {'a': 'aa', 'b': 'bb'} # 请求参数
# get请求
response = requests.get(url + 'get', data)
print(response.text) # 输出请求返回的内容
# post请求
response = requests.post(url + 'post', data)
print(response.json()) # 输出请求返回的内容并转换为JSON
控制台:
配合正则表达式(爬取图片并保存):
代码:
import os
import shutil
import re
import requests
abspath = os.path.abspath('.') # 本地绝对路径
keyword = '城市摄影' # 搜索关键字
url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1594807659507_R&pv=&ic=&nc=1&z=&hd=&latest=©right=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=' + keyword
# 下载图片
# downloadUrl[String]: 下载路径
# savePath[String]: 保存路径
def downloadImg(downloadUrl, savePath):
print('开始下载', downloadUrl)
response = requests.get(downloadUrl, stream=True) # 下载
if response.status_code == 200: # 下载成功
with open(savePath, 'wb') as f: # 打开文件
response.raw.deconde_content = True
shutil.copyfileobj(response.raw, f) # 写入文件
# 爬取图片信息
# url[String]:爬取地址
def craw(url):
# 请求头部信息
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
response = requests.get(url, headers=headers) # 请求url
# 正则表达式
pattern = re.compile(
r'{"thumbURL":"(.*?)".*?"middleURL":"(.*?)".*?"largeTnImageUrl":"(.*?)".*?"fromURL":"(.*?)".*?"fromPageTitle":"(.*?)".*?}',
re.S)
result = re.findall(pattern, response.text) # 根据正则表达式获取图片信息
for x in result:
thumbURL, middleURL, largeTnImageUrl, fromURL, fromPageTitle = x
downloadUrl = ''
if largeTnImageUrl != '': # 高质量图片路径
downloadUrl = largeTnImageUrl
elif middleURL != '': # 中等质量图片路径
downloadUrl = middleURL
elif thumbURL != '': # 缩略图路径
downloadUrl = thumbURL
# 设置图片标题
title = re.sub(r'<.*?>|\\|\/|\.$', '', fromPageTitle) # 清除斜线/html标签/结尾的.
# 获取图片扩展名
ext = re.search(r'.[a-z]*$', downloadUrl).group(0)
# 拼接保存路径
savePath = os.path.join(abspath, 'files', title + ext)
# 下载
downloadImg(downloadUrl, savePath)
# 开始爬取图片
craw(url)
控制台:
下载的文件:
BeatifulSoup库
安装:
# 安装BeatifulSoup库
pip3 install bs4
# 安装lxml解析包
pip3 install lxml
基本使用:
from bs4 import BeautifulSoup
import requests
response = requests.get(url) # 请求url
soup = BeautifulSoup(response.text, 'lxml') # 解析请求返回的内容
soup.title # 获取title标签
soup.title.string # 获取title标签的内容
soup.p['class'] # 获取p标签的class属性值
soup.find(id="link1") # 获取id为link1的标签
soup.find_all('a') # 获取所有a标签
soup.find('table', {'data-name': 'userList'} # 获取属性data-name为userList的table标签
for link in soup.find_all('a'): # 遍历所有a标签
print(link.get('href')) # 输出a标签的href属性
爬取新闻:
代码:
from bs4 import BeautifulSoup
import requests
page = 1 # 页码
url = 'http://www.cncga.org/news.asp?page='+ str(page) +'&types=3' # 请求地址
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
response = requests.get(url, headers=headers) # 请求url
response.encoding = 'gb2312' # 设置编码
soup = BeautifulSoup(response.text, 'lxml') # 解析返回内容
soup_table = soup.find('table', {'width': '760'}) # 找到新闻列表的容器
for a in soup_table.find_all('a', {'class': 'a2'}): # 获取所有的a标签并遍历
print(a.string, a.get('href')) # 输出标题和链接地址
控制台:
网友评论