美文网首页
Python实战计划——第二周大作业

Python实战计划——第二周大作业

作者: 唐宗宋祖 | 来源:发表于2016-05-28 13:38 被阅读58次

    channel.py 爬各个频道的链接搞出来

        import requests
        from bs4 import BeautifulSoup
        
        
        def get_channel_list(url):
            host = 'http://bj.ganji.com'
            wb_data = requests.get(url)
            soup = BeautifulSoup(wb_data.text, 'lxml')
            hrefs = soup.select('dl.fenlei > dt > a')
        
            for href in hrefs:
                fenlei_link = host + href.get('href')
                print(fenlei_link)
        
        
        star_url = 'http://bj.ganji.com/wu/'
        get_channel_list(star_url)
        
        channel_list = '''
            http://bj.ganji.com/jiaju/
            http://bj.ganji.com/rirongbaihuo/
            http://bj.ganji.com/shouji/
            http://bj.ganji.com/shoujihaoma/
            http://bj.ganji.com/bangong/
            http://bj.ganji.com/nongyongpin/
            http://bj.ganji.com/jiadian/
            http://bj.ganji.com/ershoubijibendiannao/
            http://bj.ganji.com/ruanjiantushu/
            http://bj.ganji.com/yingyouyunfu/
            http://bj.ganji.com/diannao/
            http://bj.ganji.com/xianzhilipin/
            http://bj.ganji.com/fushixiaobaxuemao/
            http://bj.ganji.com/meironghuazhuang/
            http://bj.ganji.com/shuma/
            http://bj.ganji.com/laonianyongpin/
            http://bj.ganji.com/xuniwupin/
            http://bj.ganji.com/qitawupin/
            http://bj.ganji.com/ershoufree/
            http://bj.ganji.com/wupinjiaohuan/
        '''
    

    pages_prasing_zouye.py其中的两个函数,函数1:抓取商品的详细信息,函数2:爬每页列表的商品链接并调用函数1。函数2(代码中的爬虫1完成了两个主要功能:一个是将商品链接存入数据库,一个是将商品链接作为爬虫2的参数爬取商品信息,并保存)

    import requests
        import pymongo
        import time
        from bs4 import BeautifulSoup
        
        client = pymongo.MongoClient('localhost', 27017)
        gan_ji = client['ganji']
        url_list = gan_ji['url_list']
        iterm_info = gan_ji['iterm_info']
        
        url = 'http://bj.ganji.com/jiaju/o1'
        url1 = 'http://bj.ganji.com/jiaju/2125004571x.htm'
        headers = {'Host': 'bj.ganji.com',
                   'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2723.3 Safari/537.36',
                   'Cookie': 'statistics_clientid=me; ganji_uuid=2850164659715942462518; citydomain=bj; ganji_xuuid=fcb80e61-aea0-4c5c-cb78-1d6055c096ff.1464314118300; GANJISESSID=669d9cafe447e6fcd543e4f60d01efda; STA_DS=1; lg=1; _gl_tracker=%7B%22ca_source%22%3A%22www.google.com%22%2C%22ca_name%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_id%22%3A%22-%22%2C%22ca_s%22%3A%22seo_google%22%2C%22ca_n%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22sid%22%3A35661161609%7D; __utma=32156897.1317419774.1464314061.1464314061.1464314061.1; __utmb=32156897.11.10.1464314061; __utmc=32156897; __utmz=32156897.1464314061.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)',
                   'DNT': '1',
                   'Connection': 'keep-alive'
                   }
        
        
        # 爬虫1:爬取每页的商品链接并储存,同时爬虫2运行存入详情
        def from_channel_store_link(channel, pages):
            url = '{}o{}'.format(channel, str(pages))
            wb_data = requests.get(url, headers=headers)
            time.sleep(1)
            if wb_data.status_code == 200:
                soup = BeautifulSoup(wb_data.text, 'lxml')
                links = soup.select('#wrapper > div.leftBox > div.layoutlist > dl > dd.feature > div > ul > li > a')
                for link in links:
                    lk = link.get('href')
                    if lk.rfind('http://bj.ganji.com/') != -1:
                        url_list.insert_one({'iterm_list': lk})  # 插入到数据库表中
                        from_lk_get_iterminfo(lk)
                        print('Done')
        
        
        # from_channel_store_link(url)
        # 爬虫2:爬取商品详情
        def from_lk_get_iterminfo(url1):
            wb_data = requests.get(url1, headers=headers)
            soup = BeautifulSoup(wb_data.text, 'lxml')
            titles = soup.select('h1.title-name')
            press_dates = soup.select('i.pr-5')  # split()[0]
            # views = soup.select('i.pl-5')
            leixings = soup.select('#wrapper > div.content.clearfix > div.leftBox > div:nth-of-type(3) > div > ul > li > span > a')
            prices = soup.select(
                '#wrapper > div.content.clearfix > div.leftBox > div > div > ul > li:nth-of-type(2) > i.f22.fc-orange.f-type')
            adds = soup.select(
                '#wrapper > div.content.clearfix > div.leftBox > div:nth-of-type(3) > div > ul > li:nth-of-type(3)')
            xinjius = soup.select(
                '#wrapper > div.content.clearfix > div.leftBox > div > div.det-summary > div > div.second-dt-bewrite > ul > li')
            print(leixings)
            for title, press_date, price, add, xinjiu ,leixing in zip(titles, press_dates, prices, adds, xinjius,leixings):
                data = {
                    'title': title.get_text(),
                    'press_date': press_date.get_text().split()[0],  # [:2]是日期时间
                    'price': price.get_text().split()[0],
                    'add': list(add.stripped_strings),
                    'xinjiu': xinjiu.get_text().split()[-1],
                    'leixing':leixing.text
                }
                print(data)
                iterm_info.insert_one(data)
    

    main.py可在终端运行,多进程

    from multiprocessing import Pool
    from pages_prasing_zouye import from_channel_store_link
    from channe import channel_list
    
    def get_all_links(channel):
        for i in range(1,100):
            from_channel_store_link(channel,i)
    
    if __name__ == '__main__':
        pool = Pool()
        pool.map(get_all_links, channel_list.split())
        pool.close()
        pool.join()
    

    jishu.py统计存入了多少

    import time
    from pages_prasing_zouye import url_list
    
    while True:
        print(url_list.find().count())
        time.sleep(5)
    

    相关文章

      网友评论

          本文标题:Python实战计划——第二周大作业

          本文链接:https://www.haomeiwen.com/subject/ipbfdttx.html