多线程爬虫
1、创建一个任务队列 : 存放待爬取的url地址
2、创建爬取线程发起请求,执行任务下载
3、创建数据队列 : 存放爬取线程获取的页面源码
4、创建解析线程 : 解析HTML页面源码,提取目标数据,存储本地,进行数据持久化
注意 :队列是线程之间常用的数据交换形式,因为队列在线程间,是线程安全的
多线程的创建和使用
from threading import Thread
import threading
import time
data = []
def download_image(url,num):
'''下载图片'''
global data
time.sleep(2)
print(url,num)
data.append(num)
def read_data():
global data
for i in data:
print(i)
if __name__ == '__main__':
# 获取当前线程的名称 threading.currentThread().name
print('主线程开启',threading.currentThread().name)
# 创建子线程
'''
target=None : 线程要执行的目标函数
name=None : 创建线城时,自定线程名称
args=() : 为目标函数,传递参数,(tuple元祖类型)
'''
thread_subl = Thread(target=download_image,
name='下载线程',
args=('https://image.baidu.com/search/detail?ct=503316480&z=0&ipn=d&word=%E7%BE%8E%E5%9B%BE&step_word=&hs=0&pn=13&spn=0&di=23690654460&pi=0&rn=1&tn=baiduimagedetail&is=0%2C0&istype=0&ie=utf-8&oe=utf-8&in=&cl=2&lm=-1&st=undefined&cs=4166284843%2C3507985054&os=1920790833%2C1534825563&simid=0%2C0&adpicid=0&lpn=0&ln=1443&fr=&fmq=1545631775914_R&fm=&ic=undefined&s=undefined&hd=undefined&latest=undefined©right=undefined&se=&sme=&tab=0&width=undefined&height=undefined&face=undefined&ist=&jit=&cg=&bdtype=0&oriquery=&objurl=http%3A%2F%2Fimg5.duitang.com%2Fuploads%2Fitem%2F201602%2F12%2F20160212180334_KWfXe.thumb.700_0.jpeg&fromurl=ippr_z2C%24qAzdH3FAzdH3Fooo_z%26e3B17tpwg2_z%26e3Bv54AzdH3Fks52AzdH3F%3Ft1%3Dcna0bambm&gsm=0&rpstart=0&rpnum=0&islist=&querylist=',1)
)
thread_sub2 = Thread(
target=read_data,
name = '读取线程'
)
'''
是否开启守护进程(在开启线程之前设置)(默认False)
daemon = False : 在主线程结束的时候,会检测子线程任务是否结束,
如果子线程任务没有完结,则会让子线程任务正常结束
daemon = True : 在主线程结束的时候,会检测子线程任务是否结束,
如果子线程任务没有完结,会让子线程跟随主线程一起结束
thread_subl.setDaemon(True)
thread_subl.daemon = True
'''
# 启动线程
thread_subl.start()
# join() : 阻塞,等待子线程中的任务执行完毕后,再回到主线程中继续执行
thread_subl.join()
thread_sub2.start()
thread_sub2.join()
print('主线程结束',threading.currentThread().name)
线程池爬虫
from concurrent.futures import ThreadPoolExecutor
import requests
from lxml.html import etree
import threading
# 线程池的目的 :创建一个线程池,里面有指定数量的线程,让线程执行任务
def down_load_data(page):
print(page)
print('正在下载第' + str(page) + '页', threading.currentThread().name)
full_url = 'http://blog.jobbole.com/all-posts/page/%s/' % str(page)
req_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
response = requests.get(full_url, headers=req_header)
if response.status_code == 200:
# 将获取到的页面源码存到dataQueue队列中
print('请求成功')
return response.text,response.status_code
def download_done(futures):
html = futures.result()[0]
html_element = etree.HTML(html)
articles = html_element.xpath('//div[@class="post floated-thumb"]')
for article in articles:
articleInfo = {}
# 标题
articleInfo['title'] = article.xpath('.//a[@class="archive-title"]/text()')[0]
# 封面
img_element = article.xpath('.//div[@class="post-thumb"]/a/img')
if len(img_element) > 0:
articleInfo['coverImage'] = img_element[0].xpath('./@src')[0]
else:
articleInfo['coverImage'] = '暂无图片'
p_as = article.xpath('.//div[@class="post-meta"]/p[1]//a')
if len(p_as) > 2:
# tag类型
articleInfo['tag'] = p_as[1].xpath('./text()')[0]
# 评论量
articleInfo['commentNum'] = p_as[2].xpath('./text()')[0]
else:
# tag类型
articleInfo['tag'] = p_as[1].xpath('./text()')[0]
# 评论量
articleInfo['commentNum'] = '0'
# 简介
articleInfo['content'] = article.xpath('.//span[@class="excerpt"]/p/text()')[0]
# 时间
articleInfo['publishTime'] = ''.join(article.xpath('.//div[@class="post-meta"]/p[1]/text()')).replace('\n',
'').replace(
' ', '').replace('\r', '').replace('·', '')
print(articleInfo)
if __name__ == '__main__':
# 创建线程池
# max_workers :指定线程池中的县线程任务
pool = ThreadPoolExecutor(max_workers=10)
for i in range(1, 201):
# 往线程池中添加任务
handler = pool.submit(down_load_data, i)
# 设置回调方法
handler.add_done_callback(download_done)
# 执行shutdown()内部实质是执行了join()方法
pool.shutdown()
网友评论