urllib.request
urllib.request模块定义函数和类用来打开URLs
urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)
<li>url:可以是一个字符串连接,也可以是一个Request对象
<li>data:是访问URL时要传送的数据
<li>timeout:访问超时设置
<pre>
--coding:UTF-8--
import urllib.request #导入模块
response = urllib.request.urlopen('https://baidu.com') #打开网页
print(response.read()) #输出内容
</pre>
构造Request实例进行访问
class urllib.request.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None)
<pre>
--coding:UTF-8--
import urllib.request
request = urllib.request.Request('https://baidu.com')
response = urllib.request.urlopen(request)
print(response.read())
</pre>
GET方式传递数据
http://www.guancha.cn/Search/?k=一带一路
这是在观察者网搜索一带一路内容的,浏览器上显示网址
<pre>
--coding:UTF-8--
import urllib.request
values = {}
values['k'] = '一带一路'
data = urllib.parse.urlencode(values)
print(data) #k=%E4%B8%80%E5%B8%A6%E4%B8%80%E8%B7%AF
url = 'http://www.guancha.cn/Search/?'+data
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
con = open('g.html','wb')
co = response.read();
con.write(co)
con.close()
则g.html里的内容为
<pre>
<!DOCTYPE html>
<html lang="zh-cmn-Hans">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="content-language" content="zh-CN">
<title>搜索结果页</title>
<link rel="stylesheet" type="text/css" href="../css/public.css">
<link rel="stylesheet" type="text/css" href="../css/main.css">
<meta name="Description" content="观察者网,致力于荟萃中外思想者精华,鼓励青年学人探索,建中西文化交流平台,为崛起中的精英提供决策参考。 " />
<meta name="Keywords" content="观察者网,观察者,春秋综合研究院,新闻,新媒体,观察,中国模式,政治,军事,历史,评论" />
<title>观察者网-中国关怀 全球视野</title> <link rel="shortcut icon" href="http://i.guancha.cn/images/favorite.ico" />
<script type="text/javascript" src="../js/jquery-1.8.2.min.js"></script>
<script type="text/javascript" src="../js/jquery.pagination.js"></script>
</head>
<body>
<div class="header">
......
</pre>
</pre>
POST请求
传递参数需要用到 前文中data参数 (data must be a bytes object)
<pre>
import urllib.request,urllib.parse
url = 'http://www.xxx.com'
postdata = urllib.parse.urlencode({
'name':'diyinqianchang',
'pass':'88888'
}).encode('UTF-8') #注意传输参数是一个bytes
req = urllib.request.Request(url,postdata)
data = urllib.request.urlopen(req).read()
print(data)
</pre>
设置Headers
<pre>
import urllib.request,urllib.parse
url = 'http://www.zhihu.com/#signin'
postdata = urllib.parse.urlencode({
'username':'188****8091',
'password':'88888888'
}).encode('UTF-8')
req = urllib.request.Request(url,postdata)
req.add_header('User_Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36') #添加Agent
req.add_header('Referer','https://www.zhihu.com/') #添加反盗链
data = urllib.request.urlopen(req).read()
con = open('zhihu.html','wb')
con.write(data)
con.close()
print(data)
</pre>
另外一种设置方式
<pre>
headers = {
'User_Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
data = urllib.parse.urlencode(values)
request = urllib.request.Request(url, data.encode('utf-8'), headers)
直接利用Request的第三个参数heards = {}
</pre>
代理服务器的设置
<pre>
coding:UTF-8
import urllib.request
"""
If proxies is given, it must be a dictionary mapping protocol names to URLs of proxies
"""
def use_proxy(proxy_addr,url):
proxy = urllib.request.ProxyHandler({'http':proxy_addr})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
data = urllib.request.urlopen(url).read().decode('utf-8')
return data
proxy_addr = '119.**.**.60:7777'
data = use_proxy(proxy_addr,'https://www.baidu.com')
print(len(data))
227
</pre>
URLError
<pre>
coding:UTF-8
"""
处理网络错的连个类URLError和HTTPError。后者是前者的子类,在抛出异常中,前者能够处理的异常较多
URLError能够处理的异常有:连接补上服务器、远程URL不存在、无网络、HTTPError
"""
import urllib.request
import urllib.error
try:
urllib.request.urlopen('http://blog.baiduss.net')
except urllib.error.HTTPError as e:
print(e.code)
print(e.reason)
except urllib.error.URLError as e:
print(e.reason)
</pre>
网友评论