sun scrapy_redis

作者: Aedda | 来源:发表于2019-04-10 20:10 被阅读0次
    • spiders——Sun1.py
    import scrapy
    from scrapy.spiders import CrawlSpider,Rule
    from scrapy.linkextractors import LinkExtractor
    from SunDistributeSpider.items import SundistributespiderItem
    
    class Sun1Spider(CrawlSpider):
        name = 'Sun1'
        allowed_domains = ['wz.sun0769.com']
        start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=']
    
        # 匹配翻页
        pagelink = LinkExtractor(restrict_xpaths=('//div[@class="pagination"]/a[text()=">"]',))
        # 匹配每个帖子的链接
        contentlink = LinkExtractor(restrict_xpaths=('//a[@class="news14"]',))
        rules = [
            Rule(pagelink, follow=True),
            Rule(contentlink, callback='parse_item')
        ]
    
    
    
        def parse_item(self,response):
            print('url:',response.url)
            item = SundistributespiderItem()
            # 标题
            item['name'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[1]/text()').extract()[0]
            # 编号
            item['number'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[2]/text()').extract()[0].strip().split(':')[-1]
            # 编号
            item['url'] = response.url
            # 内容
            item['content'] = response.xpath('//div[@class="wzy1"]/table[2]//td[@class="txt16_3"]//text()').extract()
            item['content'] = ''.join(item['content']).strip()
    
            tmp = response.xpath('//div[@class="wzy3_2"]/span/text()').extract()[0].strip().split(' ')
            item['author'] = tmp[0].split(':')[-1]
            item['pub_date'] = tmp[1].split(':')[-1] + ' '+ tmp[2]
    
            yield item
    
    • spiders——Sun2.py
    # -*- coding: utf-8 -*-
    import scrapy
    from scrapy_redis.spiders import RedisSpider
    from ..items import SundistributespiderItem
    
    
    class Sun2Spider(RedisSpider):
        name = 'Sun2'
        # allowed_domains = ['wz.sun0769.com']
        # start_urls = ['http://wz.sun0769.com/']
        redis_key = 'Sun2:start_urls'
        offset = 0
    
        def __init__(self, *args, **kwargs):
            # Dynamically define the allowed domains list.
            domain = kwargs.pop('domain', '')
            self.allowed_domains = filter(None, domain.split(','))
            super(Sun2Spider, self).__init__(*args, **kwargs)
    
        def parse(self, response):
            # 提取每个页面中帖子的链接
            links = response.xpath("//a[@class='news14']/@href").extract()
            print('len:',len(links))
            # 遍历,请求详情页面
            for link in links:
                yield scrapy.Request(link,callback=self.parse_item)
    
            # 设置页码终止的条件
            if self.offset < 30000:
                self.offset += 30
                yield scrapy.Request(self.url+str(self.offset),callback=self.parse)
    
    
        def parse_item(self,response):
            item = SundistributespiderItem()
            # 标题
            item['name'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[1]/text()').extract()[0]
            # 编号
            item['number'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[2]/text()').extract()[0].strip().split(':')[-1]
            # 编号
            item['url'] = response.url
            # 内容
            item['content'] = response.xpath('//div[@class="wzy1"]/table[2]//td[@class="txt16_3"]//text()').extract()
            item['content'] = ''.join(item['content']).strip()
    
            tmp = response.xpath('//div[@class="wzy3_2"]/span/text()').extract()[0].strip().split(' ')
            item['author'] = tmp[0].split(':')[-1]
            item['pub_date'] = tmp[1].split(':')[-1] + ' '+ tmp[2]
            print(tmp)
    
            yield item
    
    • spiders——Sun3.py
    # -*- coding: utf-8 -*-
    import scrapy
    from scrapy.linkextractors import LinkExtractor
    from scrapy.spiders import Rule
    from scrapy_redis.spiders import RedisCrawlSpider
    from ..items import SundistributespiderItem
    
    class Sun3Spider(RedisCrawlSpider):
        name = 'Sun3'
        # allowed_domains = ['wz.sun0769.com']
        # start_urls = ['http://wz.sun0769.com/']
        redis_key = 'Sun3:start_urls'
    
        def __init__(self, *args, **kwargs):
            # Dynamically define the allowed domains list.
            domain = kwargs.pop('domain', '')
            self.allowed_domains = filter(None, domain.split(','))
            super(Sun3Spider, self).__init__(*args, **kwargs)
    
        # 匹配翻页
        pagelink = LinkExtractor(restrict_xpaths=('//div[@class="pagination"]/a[text()=">"]',))
        # 匹配每个帖子的链接
        contentlink = LinkExtractor(restrict_xpaths=('//a[@class="news14"]',))
        rules = [
            Rule(pagelink, follow=True),
            Rule(contentlink, callback='parse_item')
        ]
    
    
        def parse_item(self,response):
            print('url:',response.url)
            item = SundistributespiderItem()
            # 标题
            item['name'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[1]/text()').extract()[0]
            # 编号
            item['number'] = response.xpath('//div[@class="wzy1"]/table[1]//td[2]/span[2]/text()').extract()[0].strip().split(':')[-1]
            # 编号
            item['url'] = response.url
            # 内容
            item['content'] = response.xpath('//div[@class="wzy1"]/table[2]//td[@class="txt16_3"]//text()').extract()
            item['content'] = ''.join(item['content']).strip()
    
            tmp = response.xpath('//div[@class="wzy3_2"]/span/text()').extract()[0].strip().split(' ')
            item['author'] = tmp[0].split(':')[-1]
            item['pub_date'] = tmp[1].split(':')[-1] + ' '+ tmp[2]
    
            yield item
    
    • items.py
    import scrapy
    
    
    class SundistributespiderItem(scrapy.Item):
        # define the fields for your item here like:
        # 标题,编号,链接,内容,投诉者,投诉时间
        name = scrapy.Field()
        number = scrapy.Field()
        url = scrapy.Field()
        content = scrapy.Field()
        author = scrapy.Field()
        pub_date = scrapy.Field()
    
    
    • middlewares.py
    # -*- coding: utf-8 -*-
    
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    
    
    class SundistributespiderSpiderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the spider middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_spider_input(self, response, spider):
            # Called for each response that goes through the spider
            # middleware and into the spider.
    
            # Should return None or raise an exception.
            return None
    
        def process_spider_output(self, response, result, spider):
            # Called with the results returned from the Spider, after
            # it has processed the response.
    
            # Must return an iterable of Request, dict or Item objects.
            for i in result:
                yield i
    
        def process_spider_exception(self, response, exception, spider):
            # Called when a spider or process_spider_input() method
            # (from other spider middleware) raises an exception.
    
            # Should return either None or an iterable of Response, dict
            # or Item objects.
            pass
    
        def process_start_requests(self, start_requests, spider):
            # Called with the start requests of the spider, and works
            # similarly to the process_spider_output() method, except
            # that it doesn’t have a response associated.
    
            # Must return only requests (not items).
            for r in start_requests:
                yield r
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    
    
    class SundistributespiderDownloaderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the downloader middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_request(self, request, spider):
            # Called for each request that goes through the downloader
            # middleware.
    
            # Must either:
            # - return None: continue processing this request
            # - or return a Response object
            # - or return a Request object
            # - or raise IgnoreRequest: process_exception() methods of
            #   installed downloader middleware will be called
            return None
    
        def process_response(self, request, response, spider):
            # Called with the response returned from the downloader.
    
            # Must either;
            # - return a Response object
            # - return a Request object
            # - or raise IgnoreRequest
            return response
    
        def process_exception(self, request, exception, spider):
            # Called when a download handler or a process_request()
            # (from other downloader middleware) raises an exception.
    
            # Must either:
            # - return None: continue processing this exception
            # - return a Response object: stops process_exception() chain
            # - return a Request object: stops process_exception() chain
            pass
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    
    • main.py
    from scrapy import cmdline
    name = 'Sun2'
    cmd = 'scrapy crawl {0}'.format(name)
    
    cmdline.execute(cmd.split())
    
    • settings.py
    # -*- coding: utf-8 -*-
    
    # Scrapy settings for SunDistributeSpider project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://doc.scrapy.org/en/latest/topics/settings.html
    #     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'SunDistributeSpider'
    
    SPIDER_MODULES = ['SunDistributeSpider.spiders']
    NEWSPIDER_MODULE = 'SunDistributeSpider.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'SunDistributeSpider (+http://www.yourdomain.com)'
    DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
    SCHEDULER = "scrapy_redis.scheduler.Scheduler"
    SCHEDULER_PERSIST = True
    
    ITEM_PIPELINES = {
        'scrapy_redis.pipelines.RedisPipeline': 400,
    }
    
    DOWNLOAD_DELAY = 1
    #REDIS_HOST='192.168.10.132'
    REDIS_HOST='127.0.0.1'
    REDIS_PORT=6379
    # REDIS_PARAMS = {
    #     'password': '123',
    # }
    
    SPIDER_MIDDLEWARES = {
        'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None,
    }
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'SunDistributeSpider.middlewares.SundistributespiderSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'SunDistributeSpider.middlewares.SundistributespiderDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See https://doc.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    #ITEM_PIPELINES = {
    #    'SunDistributeSpider.pipelines.SundistributespiderPipeline': 300,
    #}
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    

    相关文章

      网友评论

        本文标题:sun scrapy_redis

        本文链接:https://www.haomeiwen.com/subject/xaaoiqtx.html