美文网首页Python学习资料整理
python scrapy 模拟登录(手动输入验证码)

python scrapy 模拟登录(手动输入验证码)

作者: SkTj | 来源:发表于2019-05-14 15:45 被阅读56次

    scrapy startproject yelloweb
    vi item.py
    import scrapy

    class YellowebItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field() # 视频标题
    link = scrapy.Field() # 视频链接
    img = scrapy.Field() # 封面图片链接

    vi spiders/yellowbSpider.py
    import scrapy

    class yellowebSpider(scrapy.Spider):
    name = "webdata" # 爬虫的识别名,它必须是唯一的
    allowed_domains = ["91.91p17.space"]
    def start_requests(self):
    return [Request("http://91.91p17.space/login.php", callback=self.login, meta={"cookiejar":1})]

     headers={
        "GET /index.php HTTP/1.1"
        "Host": "91.91p17.space",
        "Connection": "keep-alive",
        "Cache-Control": "max-age=0",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Referer": "http://91.91p17.space/login.php",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.8"
    }
    def login(self, response):
        print("准备开始模拟登录!")
        captcha_image = response.xpath('//*[@id="safecode"]/@src').extract()
        print(urljoin("http://91.91p17.space", captcha_image[0]))
        if ( len(captcha_image) > 0):
            # 拟定文件名与保存路径
            localpath = "D:\SoftWare\Soft\WorkSpace\Python\scrapy\code\captcha.png"
    
            opener=urllib.request.build_opener()
            opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
            urllib.request.install_opener(opener)
            urllib.request.urlretrieve(urljoin("http://91.91p17.space", captcha_image[0]), localpath)
    
            print("此次登录有验证码,请查看本地captcha图片输入验证码:")
            captcha_value = input()
            data = {
                "username": "这里填用户名",
                "password": "这里填密码",
                "fingerprint": "1838373130",
                "fingerprint2": "1a694ef42547498d2142328d89e38c22",
                "captcha_input": captcha_value,
                "action_login": "Log In",
                "x": "54",
                "y": "21"
            }
        else:
            print("登录时没有验证码!代码又写错了!")
        # print(data)
        print("验证码对了!!!!")
        return [FormRequest.from_response(response,
                                          # 设置cookie信息
                                          meta={'cookiejar': response.meta['cookiejar']},
                                          # 设置headers信息模拟浏览器
                                          headers=self.headers,
                                          formdata=data,
                                          callback=self.next
                                          )]
    

    def next(self, response):
    href = response.xpath('//*[@id="tab-featured"]/div/a/@href').extract()
    url=urljoin("http://91.91p17.space", href[0])
    # print("\n\n\n\n\n\n"+url+"\n\n\n\n\n\n")
    yield scrapy.http.Request(url, meta={'cookiejar': response.meta['cookiejar']},
    # 设置headers信息模拟浏览器
    headers=response.headers, callback=self.parse)
    def parse(self, response):
    sel = Selector(response)
    print("进入更多精彩视频了")

        web_list = sel.css('.listchannel')
        for web in web_list:
    
    
            item = YellowebItem()
            try:
                item['link'] = web.xpath('a/@href').extract()[0]
                url = response.urljoin(item['link'])
                yield scrapy.Request(url, meta={'cookiejar': response.meta['cookiejar']}, callback=self.parse_content, dont_filter=True)
            except:
                print("完蛋了。。。。")
            # 跳转下一个页面
    
            href = response.xpath('//*[@id="paging"]/div/form/a[6]/@href').extract()
            nextPage = urljoin("http://91.91p17.space/video.php", href[0])
            print(nextPage)
            if nextPage:
                yield scrapy.http.Request(nextPage, meta={'cookiejar': response.meta['cookiejar']},
                                          # 设置headers信息模拟浏览器
                                          headers=response.headers, callback=self.parse)
    
    
    def parse_content(self, response):
            try:
                name = response.xpath('//*[@id="head"]/h3/a[1]/text()').extract()[0]
    
                item = YellowebItem()
                item['link'] = response.xpath('///*[@id="vid"]//@src').extract()[0]
                item['title'] = response.xpath('//*[@id="viewvideo-title"]/text()').extract()[0].strip()
                item['img'] = response.xpath('//*[@id="vid"]/@poster').extract()[0]
                yield item
            except:
                print("完蛋了。。。爬不下来了。。。")
    

    vi pipeline.py
    import pymysql as db

    class YellowebPipeline(object):
    def init(self):
    self.con = db.connect(user="root", passwd="root", host="localhost", db="python", charset="utf8")
    self.cur = self.con.cursor()
    self.cur.execute('drop table 91pron_content')
    self.cur.execute("create table 91pron_content(id int auto_increment primary key, title varchar(200), img varchar(244), link varchar(244))")

    def process_item(self, item, spider):
        self.cur.execute("insert into 91pron_content(id,title,img,link) values(NULL,%s,%s,%s)", (item['title'], item['img'], item['link']))
        self.con.commit()
        return item
    

    vi settings.py
    DOWNLOADER_MIDDLEWARES = {
    'yelloweb.middlewares.MyCustomDownloaderMiddleware': None,
    }

    scrapy crawl yellowebSpider

    相关文章

      网友评论

        本文标题:python scrapy 模拟登录(手动输入验证码)

        本文链接:https://www.haomeiwen.com/subject/pbosaqtx.html