美文网首页
day7、scrapy-微博登陆后抓取信息

day7、scrapy-微博登陆后抓取信息

作者: 是东东 | 来源:发表于2018-08-22 01:17 被阅读0次

因微博抓取信息网页比较复杂,需进行分析后再进行爬取
login.py

# -*- coding: utf-8 -*-
import scrapy
import json
from sevenweibospider.items import SevenweibospiderItem


class LoginSpider(scrapy.Spider):
    name = 'login'
    allowed_domains = ['weibo.cn']
    # start_urls = ['http://weibo.cn/']
    #默认get请求

    #post请求
    #登录
    def start_requests(self):
        login_url = "https://passport.weibo.cn/sso/login"
        formdata = {
                'username': '694021389@qq.com',
                'password': 'woxihuanni',
                'savestate': '1',
                'r': 'https: // weibo.cn /',
                'ec': '0',
                'pagerefer': 'https: // weibo.cn / pub /',
                'entry': 'mweibo',
                'wentry': '',
                'loginfrom': '',
                'client_id': '',
                'code': '',
                'qq': '',
                'mainpageflag': '1',
                'hff': '',
                'hfp': '',
        }
        yield scrapy.FormRequest(url=login_url, formdata=formdata, callback=self.parse_login)

    def parse_login(self, response):
        print("+++++++++++++++++")
        #对响应体判断是否登录成功
        json_res = json.loads(response.text)
        if json_res["retcode"] == 20000000:
            #登陆成功,访问详情资料
            info_url = "https://weibo.cn/?since_id=GvDAIAMb0&max_id=GvCRr7fmv&prev_page=%d&page=%d"
            for i in range(1, 2):
                url = info_url % (i-1, i)
                yield scrapy.Request(url=url, callback=self.parse_info)
        else:
            print('登陆失败!')

    def parse_info(self, response):
        weibo_list = response.xpath("//div[@class='c' and @id]")
        for weibo in weibo_list:
            item = SevenweibospiderItem()
            div = weibo.xpath("./div")
            if len(div) == 1:
                item["category"] = "Original"
                item["name"] = weibo.xpath("./div/a[@class='nk']/text()").extract_first()
                item["content"] = weibo.xpath("./div/span[@class='ctt']/text()").extract_first()
                item["dianzan"] = weibo.xpath("./div/a[2]/text()").extract_first()
                item["relay"] = weibo.xpath("./div/a[3]/text()").extract_first()
                item["comment"] = weibo.xpath("./div/a[4]/text()").extract_first()
            elif len(div) == 2:
                item["category"] = ""
                item["name"] = weibo.xpath("./div[1]/a[@class='nk']/text()").extract_first()
                item["content"] = weibo.xpath("./div[1]/span[@class='ctt']/text()").extract_first()
                item["dianzan"] = weibo.xpath("./div[2]/a/text()").extract()[-4]
                item["relay"] = weibo.xpath("./div[2]/a/text()").extract()[-3]
                item["comment"] = weibo.xpath("./div[2]/a/text()").extract()[-2]

                # 图片与转发理由
                img = weibo.xpath("./div[2]//img[@class='ib']/@src")
                if len(img) == 0:
                    #无图转发
                    item['category'] = "Relay and Picture"
                    item["reason"] = weibo.xpath("./div[2]/text()").extract_first()

                else:
                    #有图原创
                    item['category'] = "Original and Picture"
                    item["img_url"] = weibo.xpath("./div[2]//img[@class='ib']/@src").extract_first()
            else:
                len(div) == 3
                item["category"] = "Relay and Picture"
                item["name"] = weibo.xpath("./div[1]/a[@class='nk']/text()").extract_first()
                item["content"] = weibo.xpath("./div[1]/span[@class='ctt']/text()").extract_first()
                item["dianzan"] = weibo.xpath("./div[3]/a/text()").extract()[-4]
                item["relay"] = weibo.xpath("./div[3]/a/text()").extract()[-3]
                item["comment"] = weibo.xpath("./div[3]/a/text()").extract()[-2]

                #图片与转发理由
                item["img_url"] = weibo.xpath("./div[2]//img[@class='ib']/@src").extract_first()
                item["reason"] = weibo.xpath("./div[3]/text()").extract_first()
            yield item

items.py

import scrapy


class SevenweibospiderItem(scrapy.Item):
    #微博div的种类经过查看元素分析出为:四种
    #1、不带图片的原创 2、带图片的原创 3、不带图片的转载 4、带图片的转载

    #公共部分
    #类型
    category = scrapy.Field()
    #博主名字
    name = scrapy.Field()
    #内容
    content = scrapy.Field()
    #点赞
    dianzan = scrapy.Field()
    #转发
    relay = scrapy.Field()
    #评论
    comment = scrapy.Field()

    #其他部分
    #图片
    img_url = scrapy.Field()
    #理由
    reason = scrapy.Field()

settings.py
(42)

DEFAULT_REQUEST_HEADERS = {
    'Accept': '*/*',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Referer': 'https://passport.weibo.cn/signin/login?entry=mweibo&r=https%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt=',
    'Origin': 'https://passport.weibo.cn',
    'Connection': 'keep-alive',
}

相关文章

网友评论

      本文标题:day7、scrapy-微博登陆后抓取信息

      本文链接:https://www.haomeiwen.com/subject/vdzsiftx.html