美文网首页Python爬虫
Python网络爬虫--Scrapy使用IP代理池

Python网络爬虫--Scrapy使用IP代理池

作者: 我为峰2014 | 来源:发表于2018-01-14 20:31 被阅读977次

    自动更新IP池

    写个自动获取IP的类proxies.py,执行一下把获取的IP保存到txt文件中去:

    代码

    # *-* coding:utf-8 *-*  
    import requests  
    from bs4 import BeautifulSoup  
    import lxml  
    from multiprocessing import Process, Queue  
    import random  
    import json  
    import time  
    import requests  
      
    class Proxies(object):  
      
      
        """docstring for Proxies"""  
        def __init__(self, page=3):  
            self.proxies = []  
            self.verify_pro = []  
            self.page = page  
            self.headers = {  
            'Accept': '*/*',  
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',  
            'Accept-Encoding': 'gzip, deflate, sdch',  
            'Accept-Language': 'zh-CN,zh;q=0.8'  
            }  
            self.get_proxies()  
            self.get_proxies_nn()  
      
        def get_proxies(self):  
            page = random.randint(1,10)  
            page_stop = page + self.page  
            while page < page_stop:  
                url = 'http://www.xicidaili.com/nt/%d' % page  
                html = requests.get(url, headers=self.headers).content  
                soup = BeautifulSoup(html, 'lxml')  
                ip_list = soup.find(id='ip_list')  
                for odd in ip_list.find_all(class_='odd'):  
                    protocol = odd.find_all('td')[5].get_text().lower()+'://'  
                    self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))  
                page += 1  
      
        def get_proxies_nn(self):  
            page = random.randint(1,10)  
            page_stop = page + self.page  
            while page < page_stop:  
                url = 'http://www.xicidaili.com/nn/%d' % page  
                html = requests.get(url, headers=self.headers).content  
                soup = BeautifulSoup(html, 'lxml')  
                ip_list = soup.find(id='ip_list')  
                for odd in ip_list.find_all(class_='odd'):  
                    protocol = odd.find_all('td')[5].get_text().lower() + '://'  
                    self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))  
                page += 1  
      
        def verify_proxies(self):  
            # 没验证的代理  
            old_queue = Queue()  
            # 验证后的代理  
            new_queue = Queue()  
            print ('verify proxy........')  
            works = []  
            for _ in range(15):  
                works.append(Process(target=self.verify_one_proxy, args=(old_queue,new_queue)))  
            for work in works:  
                work.start()  
            for proxy in self.proxies:  
                old_queue.put(proxy)  
            for work in works:  
                old_queue.put(0)  
            for work in works:  
                work.join()  
            self.proxies = []  
            while 1:  
                try:  
                    self.proxies.append(new_queue.get(timeout=1))  
                except:  
                    break  
            print ('verify_proxies done!')  
      
      
        def verify_one_proxy(self, old_queue, new_queue):  
            while 1:  
                proxy = old_queue.get()  
                if proxy == 0:break  
                protocol = 'https' if 'https' in proxy else 'http'  
                proxies = {protocol: proxy}  
                try:  
                    if requests.get('http://www.baidu.com', proxies=proxies, timeout=2).status_code == 200:  
                        print ('success %s' % proxy)  
                        new_queue.put(proxy)  
                except:  
                    print ('fail %s' % proxy)  
      
      
    if __name__ == '__main__':  
        a = Proxies()  
        a.verify_proxies()  
        print (a.proxies)  
        proxie = a.proxies   
        with open('proxies.txt', 'a') as f:  
           for proxy in proxie:  
                 f.write(proxy+'\n')  
    
    

    执行一下:

    python  proxies.py
    

    这些有效的IP就会保存到proxies.txt文件中去

    image.png

    修改中间件middlewares.py的内容为如下:

    import random  
    import scrapy  
    from scrapy import log  
      
      
    # logger = logging.getLogger()  
      
    class ProxyMiddleWare(object):  
        """docstring for ProxyMiddleWare"""  
        def process_request(self,request, spider):  
            '''对request对象加上proxy'''  
            proxy = self.get_random_proxy()  
            print("this is request ip:"+proxy)  
            request.meta['proxy'] = proxy   
      
      
        def process_response(self, request, response, spider):  
            '''对返回的response处理'''  
            # 如果返回的response状态不是200,重新生成当前request对象  
            if response.status != 200:  
                proxy = self.get_random_proxy()  
                print("this is response ip:"+proxy)  
                # 对当前reque加上代理  
                request.meta['proxy'] = proxy   
                return request  
            return response  
      
        def get_random_proxy(self):  
            '''随机从文件中读取proxy'''  
            while 1:  
                with open('你保存的\proxies.txt', 'r') as f:  
                    proxies = f.readlines()  
                if proxies:  
                    break  
                else:  
                    time.sleep(1)  
            proxy = random.choice(proxies).strip()  
            return proxy  
    

    修改下settings文件

    DOWNLOADER_MIDDLEWARES = {  
    
         'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware':None,  
         'myproxies.middlewares.ProxyMiddleWare':125,  
         'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware':None  
    }  
    

    这里的myproxies是工程的名字,middlewares是py文件的名,ProxyMiddleWare是类的名字

    或者利用crawlera神器(收费)--------自行百度

    土豪自然随意,直接淘宝买一些代理IP就好,稳定也不是特别贵

    Github 搜索 proxy ip会发现更多

    相关文章

      网友评论

      • 65eeae1bcfb2:试了楼主的方法,获取到了代理,但是在使用的时候会全部是链接失败,没有成功的案例。不知道获得的代理IP是否有效。
      • d93a3ca13dc2:这些ip全部失效之后在一个进程中如何继续获取新的ip池?
        楼主可有解决方式
        279d1d6d70b5:爬取免费代理mongodb持久化:https://www.jianshu.com/p/8975a3997ab6
        我为峰2014:@snall 你可以参考,github里面造好的轮子,对代理ip进行爬取,然后存储到数据库中,每次使用时,排除无效ip,并且不断刷新下。保持ip的有效性,给你自己掉用。

      本文标题:Python网络爬虫--Scrapy使用IP代理池

      本文链接:https://www.haomeiwen.com/subject/kqrloxtx.html