美文网首页
2019-05-30 比价工具开发

2019-05-30 比价工具开发

作者: 年画儿 | 来源:发表于2019-05-30 20:21 被阅读0次

    目前课程看到2-11

    """当当网爬取书籍"""
    import requests
    from lxml import html
    
    def spider(sn, book_list=[]):
        """ 爬取当当网的数据 """
        url = 'http://search.dangdang.com/?key={sn}&act=input'.format(sn=sn)
        # 获取html内容
        html_data = requests.get(url).text
    
        # xpath对象
        selector = html.fromstring(html_data)
    
        # 找到书本列表
        ul_list = selector.xpath('//div[@id="search_nature_rg"]/ul/li')
        print(len(ul_list))
        for li in ul_list:
            # 标题
            title = li.xpath('a/@title')
            print(title[0])
            # 购买链接
            link = li.xpath('a/@href')
            print(link[0])
            # 价格
            price = li.xpath('p[@class="price"]/span[@class="search_now_price"]/text()')
            print(price[0].replace('¥', ''))
    
            # 商家
            store = li.xpath('p[@class="search_shangjia"]/a/text()')
            store = '当当自营' if len(store) == 0 else store[0]
            print(store)
            print('-----------------------')
    
            book_list.append({
                'title': title[0],
                'price': price[0].replace('¥', ''),
                'link': link[0],
                'store': store[0]
            })
    
    
    if __name__ == '__main__':
        sn = '9787115428028'
        spider(sn)
    
    
    """京东爬取书籍"""
    import requests
    from lxml import html
    
    
    def spider(sn, book_list=[]):
        """ 爬取京东的图书数据 """
        url = 'https://search.jd.com/Search?keyword={0}'.format(sn)
        # 获取HTML文档
    
        resp = requests.get(url)
        print(resp.encoding)
        resp.encoding = 'utf-8'
    
        html_doc = resp.text
    
        # 获取xpath对象
        selector = html.fromstring(html_doc)
    
        # 找到列表的集合
        ul_list = selector.xpath('//div[@id="J_goodsList"]/ul/li')
        print(len(ul_list))
    
        # 解析对应的内容,标题,价格,链接
        for li in ul_list:
            # 标题
            title = li.xpath('div/div[@class="p-name"]/a/@title')
            print(title[0])
            # 购买链接
            link = li.xpath('div/div[@class="p-name"]/a/@href')
            print(link[0])
    
            # 价格
            price = li.xpath('div/div[@class="p-price"]/strong/i/text()')
            print(price[0])
    
            # 店铺
            store = li.xpath('div//a[@class="curr-shop"]/@title')
            print(store[0])
    
            book_list.append({
                'title': title[0],
                'price': price[0],
                'link': link[0],
                'store': store[0]
            })
    
    
    if __name__ == '__main__':
        spider('9787115428028')
    
    """一号店爬取书籍"""
    import requests
    from lxml import html
    
    
    def spider(sn, book_list=[]):
        """ 爬取1号店的图书数据 """
        url = 'https://search.yhd.com/c0-0/k{0}/'.format(sn)
        # 获取到html源码
        html_doc = requests.get(url).text
    
        # xpath对象
        selector = html.fromstring(html_doc)
    
        # 书籍列表
        ul_list = selector.xpath('//div[@id="itemSearchList"]/div')
        print(len(ul_list))
    
        # 解析数据
        for li in ul_list:
            # 标题
            title = li.xpath('div/p[@class="proName clearfix"]/a/@title')
            print(title[0])
            # 价格
            price = li.xpath('div//p[@class="proPrice"]/em/@yhdprice')
            print(price[0])
            # 购买链接
            link = li.xpath('div/p[@class="proName clearfix"]/a/@href')
            print(link[0])
            # 店铺
            store = li.xpath('div/p[@class="storeName limit_width"]/a/@title')
            print(store)
    
            book_list.append({
                'title': title[0],
                'price': price[0],
                'link': link[0],
                'store': store[0]
            })
    
    
    
    if __name__ == '__main__':
        spider('9787115428028')
    
    

    相关文章

      网友评论

          本文标题:2019-05-30 比价工具开发

          本文链接:https://www.haomeiwen.com/subject/lrrbtctx.html