美文网首页
scrapy简书整站爬取

scrapy简书整站爬取

作者: sixkery | 来源:发表于2018-09-25 22:33 被阅读34次
    • 数据同步及异步存储到MySQL
    • 对于ajax 加载的数据用selenium辅助加载解析
    • 整站爬取提取url规则

    jianshu.py 文件

    import scrapy
    from scrapy.linkextractors import LinkExtractor
    from scrapy.spiders import CrawlSpider, Rule
    from jianshu_spider.items import JianshuSpiderItem
    
    
    class JianshuSpider(CrawlSpider):
        name = 'jianshu'
        allowed_domains = ['jianshu.com']
        start_urls = ['https://www.jianshu.com/']
    
        rules = (
            # 观察url发现,前缀都一样,后面是12个数字加小写字母的组合
            Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_detial', follow=True),
        )
        def parse_detial(self, response):
            title = response.xpath('//h1[@class="title"]/text()').extract_first('') # 提取标题
            avatar = response.xpath('//a[@class="avatar"]/img/@src').extract_first('') # 提取头像
            author = response.xpath('//span[@class="name"]/a/text()').extract_first('') # 提取作者
            publish_time = response.xpath('//span[@class="publish-time"]/text()').extract_first('') # 提取发布时间
            content = response.xpath('//div[@class="show-content"]').get() # 提取文章内容
            # 提取文章ip,也就是url上面的不一样的字符串
            process_url = response.url.split('?')[0] # 以问号分割取前一部分
            article_id = process_url.split('/')[-1] # 以 ‘/’ 分割获取最后一个字符串即为文章的id
            origin_url = response.url
            print(title)
            item = JianshuSpiderItem(title=title,avatar=avatar,author=author,publish_time=publish_time,
                                 content=content,article_id=article_id,origin_url=origin_url)
            return item
    
    

    item.py文件

    import scrapy
    
    
    class JianshuSpiderItem(scrapy.Item):
       # define the fields for your item here like:
       # name = scrapy.Field()
       title = scrapy.Field()
       avatar = scrapy.Field()
       author = scrapy.Field()
       publish_time = scrapy.Field()
       content = scrapy.Field()
       article_id = scrapy.Field()
       origin_url = scrapy.Field()
    
    

    settings.py文件

    ROBOTSTXT_OBEY = False
    DOWNLOAD_DELAY = 1
    DEFAULT_REQUEST_HEADERS = {
      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
      'Accept-Language': 'en',
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    DOWNLOADER_MIDDLEWARES = {
       #'jianshu_spider.middlewares.JianshuSpiderDownloaderMiddleware': 543,
        'jianshu_spider.middlewares.SeleniumDownloadMiddleware': 543,
    
    }
    ITEM_PIPELINES = {
       'jianshu_spider.pipelines.JianshuSpiderPipeline': 300,
        #'jianshu_spider.pipelines.JianshuTwistedPipeline': 300,
    
    }
    

    pipelines.py文件

    import pymysql
    from twisted.enterprise import adbapi # 使用异步数据库处理连接池
    from pymysql import cursors # 数据库游标类
    
    class JianshuSpiderPipeline(object):
        def __init__(self):
            params = {
                'host':'127.0.0.1',
                'port':3306,
                'user':'root',
                'password':'1326628437',
                'database':'jianshu',
                'charset':'utf8'
            }
            self.conn = pymysql.connect(**params)
            self.sursor = self.conn.cursor()
            self._sql = None
    
        @property # 属性操作,可直接调用
        def sql(self):
            if not self._sql:
                self._sql = '''insert into article(title,author,avatar,publish_time,article_id,
                origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''
                return self._sql
            return self._sql
    
        def process_item(self, item, spider):
            self.sursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],
                                          item['article_id'],item['origin_url'],item['content']))
            self.conn.commit()
            return item
    
    
    # 异步实现插入数据库,插入操作是io操作,数据量大时,会出现堵塞,异步插入很有必要
    
    class JianshuTwistedPipeline(object):
        def __init__(self):
            params = {
                'host':'127.0.0.1',
                'port':3306,
                'user':'root',
                'password':'1326628437',
                'database':'jianshu',
                'charset':'utf8',
                'cursorclass':cursors.DictCursor
            }
            # 调用异步连接池实现异步插入数据库
            self.dbpool = adbapi.ConnectionPool("pymysql",**params)
            self._sql = None
    
        @property
        def sql(self):
            if not self._sql:
                self._sql = '''insert into article(title,author,avatar,publish_time,article_id,
                origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''
                return self._sql
            return self._sql
    
        def process_item(self,item,spider):
            # 异步插入数据
            defer = self.dbpool.runInteraction(self.insert_item,item)
            # 错误处理
            defer.addErrback(self.handle_error,item,spider)
    
    
        def insert_item(self,item,cursor):
            cursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],
                                          item['article_id'],item['origin_url'],item['content']))
    
    
        def handle_error(self,item,error,spider):
            print('+'*30 + 'error' + '+'*30)
            print(error)
            print('+'*30 + 'error' + '+'*30)
    
    

    middleware.py文件

    from selenium import webdriver
    import time
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.wait import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from scrapy.http.response.html import HtmlResponse
    
    # 用selenium重写请求过程,实现去爬取一些用ajax加载的页面
    # 一些点赞数,评论数,喜欢数,推荐阅读的文章链接都是ajax加载的。
    class SeleniumDownloadMiddleware(object):
        def __init__(self):
            self.browser = webdriver.Chrome() #
            self.wait = WebDriverWait(self.browser,10)
    
        def process_request(self,request,spider):
            self.browser.get(request.url)
            print('我正在用selenium自动化工具下载url')
            time.sleep(1)
            try:
                while True:
                    # 这里因为有些文章下方有许多加载更多,在文章被一下专栏收录里,所以要重复点击
                    showmore = self.browser.find_element_by_class_name('show-more')
                    showmore.click()
                    time.sleep(0.3)
                    if not showmore:
                        break
            except:
                pass
            source = self.browser.page_source
            response = HtmlResponse(url=self.browser.current_url,request=request,body=source,encoding='utf-8')
            return response
    
    

    相关文章

      网友评论

          本文标题:scrapy简书整站爬取

          本文链接:https://www.haomeiwen.com/subject/zghroftx.html