美文网首页
按关键字爬取某政府网站信息

按关键字爬取某政府网站信息

作者: 不吃肉饼只喝汤 | 来源:发表于2017-06-22 23:08 被阅读0次

    编译环境:spyder(python 3.6),windows 10
    运行环境:linux
    我用的scrapy框架做的,写起来还是很方便的,有时间再加注释。。。
    所有代码如下:
    setting文件

    BOT_NAME = 'bidinfo'
    
    SPIDER_MODULES = ['bidinfo.spiders']
    NEWSPIDER_MODULE = 'bidinfo.spiders'
    LOG_LEVEL = 'INFO'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'bidinfo (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = True
    DOWNLOAD_DELAY = 1
    
    ITEM_PIPELINES = {
        'bidinfo.pipelines.BidinfoPipeline': 300,
    }
    

    item文件

    import scrapy
    
    
    class BidinfoItem(scrapy.Item):
        title = scrapy.Field()  # 标题
        url = scrapy.Field()  # 链接
        label = scrapy.Field()  # 标签
        post_time = scrapy.Field()  # 发表时间
        content = scrapy.Field()  # 内容
        # define the fields for your item here like:
        # name = scrapy.Field()
    

    pipelines文件

    from scrapy import signals  
    #import json  
    #import codecs  
    import sys 
    #import importlib
    #importlib.reload(sys) 
    
    class BidinfoPipeline(object):
        def process_item(self, item, spider):
            name = item['title'] 
            file_name = str(name)+".txt"  
            cpath='/media/BidCrawler/bid'+'/'
            path=cpath+file_name
            print(path)
            fp = open(path,'w')
            fp.write(item['title']+'\n')
            fp.write(item['url']+'\n')
            fp.write(item['label']+'\n')
            fp.write(item['post_time']+'\n')
            fp.write(item['content'])  
            fp.close()
             #with opne("a.txt","a") as f:
                 #f.write(log)
    
            return item
    

    spider文件

    import scrapy
    import sys
    from bidinfo.items import BidinfoItem
    from scrapy.http import Request
    import re
    
    
    class BidnewsSpider(scrapy.Spider):
        name = 'bidnews'
        allowed_domains = ['ccgp.gov.cn']
        
        def start_requests(self):
            #kws=['客流预测','wifi探针','公交线网优化','公交线网','公交运行','公交走廊',
             # '公交专用道','OD','智慧交通','智能交通','公共交通','智能交通管理',
              #'智慧城市顶层设计','运行指数','智慧城市规划','多规合一','出行特征',
              #'人流应急管理','交通枢纽','交通仿真','交通优化','TransCAD']
            kws=sys.argv[1]
            kws=kws.strip('[\']').split("', '")
            start_time=sys.argv[2].replace('-','%3A')
            end_time=sys.argv[3].replace('-','%3A')
            all_urls = ["http://search.ccgp.gov.cn/bxsearch?searchtype=1&page_index=1&bidSort=0&buyerName=&projectId=&pinMu=0&bidType=0&dbselect=bidx&kw={0}&start_time={1}&end_time={2}&timeType=2&displayZone=&zoneId=&pppStatus=0&agentName=".format(i,start_time,end_time) for i in kws]
            for url in all_urls:
                yield Request(url,self.parse)
        def parse(self, response):
            page=(int(response.xpath('//div[@class="vT_z"]/div[1]/div/p[1]/span[2]/text()').extract()[0])//20)+2
            for i in range(1,page):  
                url =str(response.url)
                x='page_index='+str(i)
                url = re.sub(r'page_index=(.)',x,url)
                yield Request(url, callback=self.get_message) 
                
        def get_message(self, response):
            item = BidinfoItem()
            item['title'] = str(response.xpath('//h2[@class="tc"]/text()').extract()[0]).replace('/','')
            item['url'] = str(response.url)  
            item['label'] = '|'.join(response.xpath('//div[@class="vT_detail_nav_lks"]/a/text()').extract()[1:3])
            item['post_time'] = str(response.xpath('//span[@id="pubTime"]/text()').extract()[0])
            item['content'] = ''.join([i.strip() for i in response.xpath('//div[@class="vT_detail_content w760c"]//text()').extract()])
            yield item
    
    

    相关文章

      网友评论

          本文标题:按关键字爬取某政府网站信息

          本文链接:https://www.haomeiwen.com/subject/zcdqcxtx.html