使用协程池实现爬虫的具体实现
"""
@author:Rudy
@time : 12月5日
@message:采用协程池爬取糗事百科热门上的段子用户昵称
"""
import gevent.monkey
gevent.monkey.patch_all()
from gevent.pool import Pool
import time
from queue import Queue
import requests
from lxml import etree
# from multiprocessing.dummy import Pool
class QiuBai():
def __init__(self):
self.temp_url = "https://www.qiushibaike.com/8hr/page{}"
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"}
self.queue = Queue()
self.pool = Pool(5)
self.is_runnig = True
self.total_request_num = 0
self.total_response_num = 0
def get_url_list(self):
for i in range(1, 14):
self.queue.put(self.temp_url.format(i))
self.total_request_num += 1
def parse_url(self, url):
response = requests.get(url, headers=self.headers)
return response.content.decode()
def get_content_list(self, html_str):
html = etree.HTML(html_str)
div_list = html.xpath("//div[@id='content-left']/div")
content_list = []
for div in div_list:
item = {}
item["username"] = div.xpath(".//h2/text()")[0]
item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")] # 敲黑板
content_list.append(item)
return content_list
def save_content_list(self, content_list):
for content in content_list:
print(content)
def _execete_request_content_save(self):
url = self.queue.get() # 进行一次url的地址的请求和保存
html_str = self.parse_url(url)
# 3 提取数据
content_list = self.get_content_list(html_str)
# 4 保存
self.save_content_list(content_list)
self.total_response_num += 1
def _callback(self, temp):
if self.is_runnig:
self.pool.apply_async(self._execete_request_content_save, callback=self._callback)
def run(self): # 实现主要的逻辑
# 1 获取URL
self.get_url_list()
for i in range(3): # 设置并发数为3
# 2 发送请求 获取响应
self.pool.apply_async(self._execete_request_content_save, callback=self._callback)
while True:
time.sleep(0.0001)
if self.total_response_num >= self.total_request_num:
self.is_runnig = False
break
if __name__ == '__main__':
qiubai = QiuBai()
qiubai.run()
网友评论