声明:
1、本博客所涉及爬虫及其爬取的数据仅用于爬虫技术研究,请勿用于商业用途。
2、本博文中涉及的网址均已隐去关键字段。
基于python3.5版本
源代码
spidertest.py
# -*- coding: utf-8 -*-
import scrapy
from testscrapy01.items import QIBEBT_Zhuanli_Item
from scrapy.selector import Selector
import re
class QibebtZhuanliSpider(scrapy.Spider):
name = "xxxx_zhuanli_01"
allowed_domains = ["qibebt.cas.cn"]
start_urls = ['http://www.xxxx.cas.cn/kycg/zl/index.html']
lst = []
for i in range(1,15):
start_urls.append("http://www.xxxx.cas.cn/kycg/zl/index_" + str(i) +".html")
# start_urls = ['http://www.xxxx.cas.cn/kycg/zl/index.html'] + lst
def parse(self, response):
print("目标网页为:" + response.url)
hxs = Selector(response)
qitem = QIBEBT_Zhuanli_Item()
# 把所有行抓取出来
zls = hxs.xpath("//tr[@bgcolor='#f2f7f1']")
# print(len(zls))
for zl in zls:
#专利名称
name = zl.xpath("td[@height='26']/a[@target='_blank']/text()").extract()[0]
# name = zl.xpath("//a/text()").extract()
#专利类别
type = zl.xpath("td[@align='center']/text()").extract()[0]
#申请号
number = zl.xpath("td[@align='center']/text()").extract()[1]
#申请日期
apply_date = zl.xpath("td[@align='center']/text()").extract()[2]
sq_date = "无"
try:
#授权日期
sq_date = zl.xpath("td[@align='center']/text()").extract()[3]
except:
sq_date = "无"
#发明人
#有一个"\xa0"字符串,进行特殊处理
inventor = repr(zl.xpath("td[@align='left']/text()").extract()[0])
# print(isinstance(inventor,str))
inventor = re.sub(r"'","",inventor)
# inventor = re.sub("\\xa0", ",", inventor)
main_inventor = inventor.split('\\xa0')[0]
other_inventors = inventor.split('\\xa0')[1]
# print("专利名称 = ", name)
# print("专利类别 = ", type)
# print("申请号 = ", number)
# print("申请日期 = ", apply_date)
# print("授权日期 = ", sq_date)
# print("第一发明人 = ", main_inventor)
# print("其他发明人 = ", other_inventors)
# print("")
yield qitem
网友评论