为什么要重写start_requests?
比如模板默认的是start_urls = ['http://books.toscrape.com/']
如果想添加headers伪装浏览器,此时就无从下手了,而重写start_requests可以方便的做到这点,书上做了重点说明,以下是完整代码:
# -*- coding: utf-8 -*-
import scrapy
class BooksSpider(scrapy.Spider):
name = 'books'
allowed_domains = ['books.toscrape.com']
#start_urls = ['http://books.toscrape.com/']
def start_requests(self):
#回调函数不再是parse()
#并添加了伪装
# 也可以先定义好指针,再传入Request,这样会比较简洁
yield scrapy.Request(
'http://books.toscrape.com',
callback=self.parse_book,
headers={'User-Agent':'Mozilla/5.0'},
# 这里过滤开启或者不开启,结果都一样
dont_filter=True
)
def parse_book(self, response):
path=response.xpath('//li[@class="col-xs-6 col-sm-4 col-md-3 col-lg-3"]/article')
for book in path:
name=book.xpath('./h3/a/text()').extract()
price=book.xpath('./div[2]/p[1]/text()').extract()
yield {
'name':name,
'price':price
}
next_page=response.xpath('//li[@class="next"]/a/@href').extract_first()
if next_page:
next_page=response.urljoin(next_page)
yield
# 此处记得修改回调函数的名称,不是之前的self.parse
scrapy.Request(next_page,callback=self.parse_book)
网友评论