1.scrapy startproject circ
2.cd circ
3.scrapy genspider -t crawl cf circ.gov.cn
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import re
class CfSpider(CrawlSpider):
name = 'cf'
allowed_domains = ['circ.gov.cn']
start_urls = ['http://bxjg.circ.gov.cn/web/site0/tab5241/']
# 定义提取url地址规则
rules = (
# LinkExtractor链接提取器,提取url地址
#callback提取出来的url地址,response会交给callba处理
#follow表示当前url地址的响应是否重新经过rules来提取url,不写表示不会继续被过滤
Rule(LinkExtractor(allow=r'/web/site0/tab5241/info\d+\.htm'), callback='parse_item'), #第一页
Rule(LinkExtractor(allow=r'/web/site0/tab5241/module14458/page\d+\.htm'), follow=True),#翻页
)
#parse函数有特殊功能,不能定义
def parse_item(self, response):
item = {}
item["title"]=re.findall("<!--TitleStart-->(.*?)<!--TitleEnd-->",response.body.decode())[0]
item["publish_data"]=re.findall("发布时间:(20\d{2}-\d{2}-\d{2})",response.body.decode())[0]
print(item)
# yield scrapy.Request(
# url,
# callback=self.parse_detail,
# meta={"item":item}
# )
# def parse_detail(self,response):
# item=response.meta["item"]
# item["price"] = "///"
# yield item
网友评论