美文网首页
week2-爬取赶集网10万商品数据

week2-爬取赶集网10万商品数据

作者: littleY | 来源:发表于2016-05-06 23:40 被阅读0次

    学习Python爬虫的第二周,完成了爬取赶集网二手市场的10万商品数据。

    成果:
    url_list.png
    item_info.png
    代码:

    *channel_extract.py *

    from bs4 import BeautifulSoup
    import requests
    
    # spider1
    start_url = 'http://bj.ganji.com/wu/'
    url_host= 'http://bj.ganji.com'
    def get_channel_urls(url):
        wb_data = requests.get(url)
        soup = BeautifulSoup(wb_data.text, 'lxml')
        links = soup.select('dl.fenlei dt > a ')
        for link in links:
            page_url = url_host + link.get('href')
            print(page_url)
    get_channel_urls(start_url)
    
    channel_list = '''
    http://bj.ganji.com/jiaju/
    http://bj.ganji.com/rirongbaihuo/
    http://bj.ganji.com/shouji/
    http://bj.ganji.com/shoujihaoma/
    http://bj.ganji.com/bangong/
    http://bj.ganji.com/nongyongpin/
    http://bj.ganji.com/jiadian/
    http://bj.ganji.com/ershoubijibendiannao/
    http://bj.ganji.com/ruanjiantushu/
    http://bj.ganji.com/yingyouyunfu/
    http://bj.ganji.com/diannao/
    http://bj.ganji.com/xianzhilipin/
    http://bj.ganji.com/fushixiaobaxuemao/
    http://bj.ganji.com/meironghuazhuang/
    http://bj.ganji.com/shuma/
    http://bj.ganji.com/laonianyongpin/
    http://bj.ganji.com/xuniwupin/
    http://bj.ganji.com/qitawupin/
    http://bj.ganji.com/ershoufree/
    http://bj.ganji.com/wupinjiaohuan/
    '''
    

    page_parsing.py

    from bs4 import BeautifulSoup
    import requests
    import time
    import pymongo
    
    client = pymongo.MongoClient('localhost', 27017)
    ceshi = client['ceshi']
    url_list = ceshi['url_list1']
    item_info = ceshi['item_info1']
    
    #随机UA
    userAgent = random.choice(['Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36', 
                               'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36', 
                               'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36', 
                               'Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36',  
                               'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',   
                               'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
                                ])
    # spider2
    def get_links_from(channel, pages, who_sells='o'):
        headers = {
            'User-Agent': userAgent,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept - Encoding': 'gzip, deflate, sdch',
            'Accept - Language': 'zh - CN, zh;q = 0.8, en;q = 0.6',
            'Cache - Control': 'max - age = 0','Connection': 'keep - alive'
        }
        client = pymongo.MongoClient('localhost', 27017)
        ceshi = client['ceshi']
        url_list = ceshi['url_list1']
        list_view = '{}{}{}/'.format(channel, str(who_sells), str(pages))
        wb_data = requests.get(list_view, headers=headers)
    
        #随机访问延时
        i = random.randrange(0, 3)
        time.sleep(i)
        soup = BeautifulSoup(wb_data.text, 'lxml')
        links = soup.select('li.js-item > a')
        if soup.find('ul', 'pageLink'):
            for link in links:
                item_link = link.get('href')
                headers = requests.head(item_link, allow_redirects=False).headers
                if headers['Server'] == 'nginx':
                    item_link = headers['Location']
                    url_list.insert_one({'url': item_link})
                else:
                    url_list.insert_one({'url': item_link})
                    pass
                get_item_info(item_link)
        else:
            pass
            
    def get_item_info(url):
        headers = {
            'User-Agent': userAgent,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept - Encoding': 'gzip, deflate, sdch',
            'Accept - Language': 'zh - CN, zh;q = 0.8, en;q = 0.6',
            'Cache - Control': 'max - age = 0','Connection': 'keep - alive'
        }
        client = pymongo.MongoClient('localhost', 27017)
        ceshi = client['ceshi']
        item_info = ceshi['item_info1']
        wb_data = requests.get(url, headers=headers)
        soup = BeautifulSoup(wb_data.text, 'lxml')
        no_longer_exist = soup.find('div', 'error')
        if no_longer_exist:
            pass
        else:
            title = soup.select('.title-name')[0].text
            date = soup.select('.pr-5')[0].text.split('发布')[0].strip() if soup.find('i', 'pr-5') else None
            cate = soup.select('.det-infor > li > span > a')[0].text
            price = soup.select('.f22.fc-orange.f-type')[0].text
            loco_list = list(soup.select('div.leftBox > div:nth-of-type(3) > div > ul > li:nth-of-type(3) > a'))
            area = []
            for loco in loco_list:
                area.append(loco.text)
            state = soup.select('ul.second-det-infor.clearfix > li')[0].text.split(':')[-1].strip() if soup.find('ul', 'second-det-infor') and soup.select('ul.second-det-infor.clearfix > li')[0].text.split(':')[0].strip() == '新旧程度' else None
            item_info.insert_one({'title': title, 'date': date, 'cate': cate, 'price': price, 'area': area, 'state': state, 'url': url})
    

    main.py

    from multiprocessing import Pool
    from channel_extract import channel_list
    from page_parsing import get_links_from, url_list, item_info, get_item_info
    
    def get_all_links_from(channel):
        for num in range(1, 100):
            get_links_from(channel, num)
    if __name__ == '__main__':
        pool = Pool()
        pool.map(get_all_links_from, channel_list.split())
            # 断点续传
        db_urls = [item['url'] for item in url_list.find()]
        index_urls = [item['url'] for item in item_info.find()]
        x = set(db_urls)
        y = set(index_urls)
        rest_of_urls = x - y
        pool.map(get_item_info, rest_of_urls)
    

    counts.py

    import time
    from page_parsing import url_list, item_info
    
    while True:
        print(url_list.find().count())
        print(item_info.find().count())
        print('\n')
        time.sleep(10)
    
    总结:
    • 大规模数据的爬取之前应该做好爬虫工作流程的设计,设计多个爬虫,分别负责URL链接和每个链接的详情页的爬取。同时设计两个数据库,一个用来存放URL,另一个用来存放商品详情。
    • 有一些网页进行了重定向,用requests的head方法可以获取响应头,通过响应头中的'location'可以获得真实的URL,通过设置请求参数allow_redirects=True可以启用重定向,默认情况下是禁用的。
    • 为了提高爬取效率,可以使用多线程和多进程。使用多进程的前提是拥有足够的CPU内核,因为一个进程会占用一个CPU。对于单核系统,只能使用多线程爬取。
    • 在抓取过程中难免会遇到网络问题而导致程序终止,需要设计断点续传功能保证数据库中抓取的数据不会重复。设计思路是存储商品详情的同时增加一个字段,存储每个商品的URL,如果程序中断,则将所有链接与商品详情表中已抓取链接做差集,抓取剩下的链接。
    • 通过获取数据库中的数据数目,可以创建一个监控程序统计所抓数据的数目。

    相关文章

      网友评论

          本文标题:week2-爬取赶集网10万商品数据

          本文链接:https://www.haomeiwen.com/subject/cypjrttx.html