美文网首页
day8、scrapy-读书网,存入redis和mysql

day8、scrapy-读书网,存入redis和mysql

作者: 是东东 | 来源:发表于2018-08-22 23:54 被阅读0次

dushu.py

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from CrawlSpiderDemo.items import CrawlspiderdemoItem
from scrapy_redis.spiders import RedisCrawlSpider

# 在scrapy框架中包含了两类分别是Spider(基础爬虫)和CrawlSpider
# CrawlSpider是Spider的派生类,spider类的设计原则只爬取start_url中的网页内容,CrawlSpider定义了一些规则用于提供跟踪链接的方法,可以跟踪一堆的url


class DushuSpider(RedisCrawlSpider):
    name = 'dushu'
    allowed_domains = ['dushu.com']
    # start_urls = ['http://www.dushu.com/book/1163.html']
    redis_key = "book:start_urls"
    # http://www.dushu.com/book/1163_3.html
    # 这个是url的匹配规则,我们可以通过这个匹配规则去匹配所有的网站
    rules = (
        Rule(LinkExtractor(allow=r'/book/1163_\d+\.html'), callback='parse_item', follow=True),
    )
    # rules规则:包含了一个Rule对象,每一个Rule对象对爬取网站的动作做了一个特定的操作,根据LinkExtractor里面的内容去匹配网址,然后进行逐个的抓取,然后取回调响应的回调函数

    # LinkExtractor的规则:
    # 1)用正则LinkExtractor(allow="某正则")
    # 2) 用xpath匹配,LinkExtractor(retrict_xpath="某xpath路径")
    # 3)用css匹配,LinkExtractor(retrict_css="某css选择器")

    def parse_item(self, response):
        # print(response)
        # 提取出所有的书籍
        books = response.xpath("//div[@class='book-info']")
        # print(books)
        for book in books:
            item = CrawlspiderdemoItem()
            item["title"] = book.xpath(".//h3/a/text()").extract_first()
            item["author"] = book.xpath(".//p[1]/a/text()").extract_first()
            item["info"] = book.xpath(".//p[2]/text()").extract_first()
            item["img_url"] = book.xpath(".//img/@data-original").extract_first()

            # 获取二级页面的url
            book_url = "https://www.dushu.com" + book.xpath(".//h3/a/@href").extract_first()
            yield scrapy.Request(url=book_url,callback=self.parse_book,meta={"book_item":item})

    def parse_book(self, response):
        item = response.meta["book_item"]
        item["price"] = response.xpath("//p[@class='price']/span/text()").extract_first()
        item["summary"] = response.xpath("//div[starts-with(@class,'text txtsummary')]/text()").extract()[2]

        yield item

items.py

import scrapy


class CrawlspiderdemoItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    author = scrapy.Field()
    info = scrapy.Field()
    img_url = scrapy.Field()
    price = scrapy.Field()
    summary = scrapy.Field()

pipelines.py

import pymysql


class SevenCarwlspiderdemoPipeline(object):
    def open_spider(self, spider):
        self.conn = pymysql.connect(
            host="127.0.0.1",
            port=3306,
            user="root",
            password="9998",
            db="bookdb",
            charset="utf8",
        )
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        #创建一个sql语句
        sql = "INSERT INTO books VALUES(NULL,'%s','%s','%s','%s','%s','%s')" % (item["title"], item["author"], item["info"], item["img_url"], item["price"], item["summary"])
        self.cursor.execute(sql)
        self.conn.commit()

        return item

    def close_spider(self, spider):
        self.conn.close()
        self.cursor.close()

settings.py
(67)

ITEM_PIPELINES = {
   'CrawlSpiderDemo.pipelines.CrawlspiderdemoPipeline': 300,
   # 加入redis组件的管道
   "scrapy_redis.pipelines.RedisPipeline":400
}

# Redis相关的组件
# 去重组件
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# scrapy_redis调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 爬取的过程中是否允许暂停
SCHEDULER_PERSIST = True

# 配置数据库信息
REDIS_HOST = "10.36.131.78"
REDIS_PORT  = 6379

相关文章

网友评论

      本文标题:day8、scrapy-读书网,存入redis和mysql

      本文链接:https://www.haomeiwen.com/subject/zdbuiftx.html