urllib基础

import urllib.request
#urlretrieve (网址,本地文件储存地址) 下载网页到本地 也可以用做下载图片
urllib.request.urlretrieve(r"https://www.baidu.com",r"D:\py\简单爬虫\baidu.html")
#爬虫产生的内存缓存 清除 系统变快
urllib.request.urlcleanup()
#info() 当前爬取的网页简介信息
User_Agent={'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
req=urllib.request.Request("https://book.douban.com",None,headers=User_Agent)
file= urllib.request.urlopen(req)
print(file.info())
# Date: Thu, 05 Dec 2019 14:35:04 GMT
# Content-Type: text/html; charset=utf-8
# Transfer-Encoding: chunked
# Connection: close
# Vary: Accept-Encoding
# Vary: Accept-Encoding
# X-Xss-Protection: 1; mode=block
# X-Douban-Mobileapp: 0
# Expires: Sun, 1 Jan 2006 01:00:00 GMT
# Pragma: no-cache
# Cache-Control: must-revalidate, no-cache, private
# Set-Cookie: bid=Ej-9Yhco5Ao; Expires=Fri, 04-Dec-20 14:35:04 GMT; Domain=.douban.com; Path=/
# X-DOUBAN-NEWBID: Ej-9Yhco5Ao
# X-DAE-App: book
# X-DAE-Instance: default
# Server: dae
# Strict-Transport-Security: max-age=15552000
# X-Content-Type-Options: nosniff
#getcode() 200是正常状态码 其他的不能访问 用于略过不能打开的网页
print(file.getcode())#200
#geturl() 获取当前网页的url
print(file.geturl()) #https://book.douban.com
超时设置

#超时设置 循环访问直到服务器相应变慢测试超时 结果电脑快
import urllib.request
for i in range(0,100):
try:
file=urllib.request.urlopen("http://www.taocloud.com.cn/",timeout=1)
print(len(file.read().decode("utf-8")))
except Exception as identifier:
print("出现异常")
自动模拟Http请求


根据上面规则 可以去构造get请求

通过标签去找标题

代码应该是这样了 但是爬不出来 我看了所有帖子最接近的2019.6月的也不行
清一色有个返回是 百度安全验证 可能是加了高级的反爬
#get请求实战 --实现百度信息自动搜索
import urllib.request,re
User_Agent={'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
keywd="Python"
#url不包含中文 所以要编码
keywd=urllib.request.quote(keywd)
url="https://www.baidu.com/s?wd="+keywd
req=urllib.request.Request(url,None,headers=User_Agent)
file=urllib.request.urlopen(req)
print(file.getcode())
data=file.read().decode("utf-8")
print(data)
#爬取标题进行分析 找规律 类似上次的
pat=r"'title':'(.*?)',"
rst=re.compile(pat).findall(data)
print(rst)
因为这个大坑结果没有出来
https://www.jianshu.com/p/e58123bddd49
他们说用request好 request是urllib3
#get请求实战 --实现百度信息自动搜索
import urllib.request,re
User_Agent={'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
'Cookie':"BAIDUID=88CA88B2B799200DC1A23164314D74A2:FG=1; BDUSS=G1CbFJ6STNYLVJoT3ZVUzNIUnZZVGRTSmh6TTRFT3pORzZ4dURBRXFnTW44UWhkRVFBQUFBJCQAAAAAAAAAAAEAAACZmVsHMTAwNDAxOTI2NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACdk4VwnZOFcM; BIDUPSID=88CA88B2B799200DC1A23164314D74A2; PSTM=1563629371; BD_UPN=12314753; BDORZ=FFFB88E999055A3F8A630C64834BD6D0; __guid=136081015.3291552332271329000.1576146419876.8496; BD_HOME=1; BDRCVFR[BLowsRiZ163]=mk3SLVN4HKm; H_PS_PSSID=; delPer=0; BD_CK_SAM=1; PSINO=2; monitor_count=12; sugstore=0; H_PS_645EC=e5625rKi84izdZ8Ogq%2B%2BTh3RXIMn7r3cZnvR8pdPblN3s3EDheunixTWOlkXgNFks3Hf9vCa%2FnNm; BDSVRTM=184; COOKIE_SESSION=854_5_9_8_10_40_0_0_9_7_100_0_0_0_100_32_1576143892_1576130780_1576227666%7C9%23144751_327_1576130812%7C9"
}
keywd="Python"
#url不包含中文 所以要编码
keywd=urllib.request.quote(keywd)
#后缀缺一个都要进百度
url="https://www.baidu.com/s?wd="+keywd+"&usm=3&rsv_idx=2&rsv_page=1"
req=urllib.request.Request(url,None,headers=User_Agent)
file=urllib.request.urlopen(req)
print(file.getcode())
data=file.read().decode("utf-8")
#爬取标题进行分析 找规律 类似上次的
pat=r"data-tools='(.*?)'"
rst=re.findall(pat,data,re.S)
print(rst)
pat=r"\"title\":\"(.*?)\""
rst1=[]
for i in rst:
rst1.append(re.findall(pat,i))
print(rst1)
网友评论