美文网首页
爬取全国各大城市的公交车信息

爬取全国各大城市的公交车信息

作者: 李小萌mmm | 来源:发表于2019-01-24 16:52 被阅读0次

    city_name.py
    爬取城市(市级)的各个名称,大概270多个

    image.png
    # !/usr/bin/env/python
    # .*. encoding:utf-8 -*-
    import requests
    from lxml import etree
    import re
    
    
    def get_city_page():
        url ='https://blog.csdn.net/Only_Tan/article/details/30459325'
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6"
        }
        html = requests.get(url=url, headers=headers)
        if html.status_code == 200:
            response = html.content.decode('utf-8')
    
            return response
    def parse_city_html(html):
        result_list =[]
        etree_html = etree.HTML(html)
        channel_result= etree_html.xpath('//div[@class="htmledit_views"]')
        for result in channel_result:
            name = result.xpath('//p/text()')[43:]
            name = str(name)
            pattern = re.compile(r'\((.*?)\)')
            city_name = re.findall(pattern,name)
            for city_name in city_name:
                result =city_name.lower()
                result_list.append(result)
            return result_list
    if __name__ == '__main__':
        html=get_city_page()
        city_list = parse_city_html(html)
        print(city_list)
    
    
    
    

    bus.py(主程序)

    1.运行主程序从city_name.py获得城市名(列表), 因为爬取的网页地址规律是'https://guangzhou.8684.cn/list1'只需要替换掉城市的名字比如guangzhou 变成chengdu 就是成都的公交车线路
    import requests
    from lxml import etree
    import re
    import json
    from day09.city_name import *
    
    detail_href =[]
    bus_info = []
    def get_page(url):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36"
        }
        html = requests.get(url=url, headers=headers)
        if html.status_code == 200:
            response = html.content.decode('utf-8')
    
            return response
    
    def parse_html(html):
        etree_html = etree.HTML(html)
        channel_result= etree_html.xpath('//div[@class="stie_list"]')
        for result in channel_result:
    
            href = result.xpath('//a/@href')
            pattern = re.compile('/x_\w{8}')
            href = re.findall(pattern,str(href))
            detail_href.append(href)
            #title = result.xpath('//a/@title')
            #print(len(href),len(title))
    
    def parse_detail_html(html):
        etree_html = etree.HTML(html)
        channel_result = etree_html.xpath('//div[@class="bus_line_content"]')
        for result in channel_result:
            bus_info_dict ={}
            bus_title = result.xpath('//div[@class="bus_i_t1"]/h1/text()')[0][:-5]
            run_time = result.xpath('//p[@class="bus_i_t4"]/text()')[0]
            info = result.xpath('//p[@class="bus_i_t4"]/text()')[1]
            update_time =result.xpath('//p[@class="bus_i_t4"]/text()')[-1]
            company = result.xpath('//p[@class="bus_i_t4"]/text()')[2]
            company_name =result.xpath('//p[@class="bus_i_t4"]/a/text()')[0]
            company_info = company + company_name
            bus_info_dict['bus_title'] = bus_title
            bus_info_dict['run_time'] = run_time
            bus_info_dict['info'] = info
            bus_info_dict['update_time'] = update_time
            bus_info_dict['company_info'] = company_info
    
    
    
            # 总站数
            total = result.xpath('//span[@class="bus_line_no"]/text()')[0]
            total = str(total)[2:4]
            total_num =int(total)
            bus_info_dict['total_num'] = total_num
            # 通过总站数获得站名,并通过遍历获得下标
            line_name = result.xpath('//div[@class="bus_site_layer"]//a/text()')[:total_num]
            bus_info_dict['line_name'] = line_name
            bus_info.append(bus_info_dict)
    
    
    
    def write_to_json(file,filename):
        result_json_str =json.dumps(file,ensure_ascii=False)
        with open(str(filename)+'.json','a',encoding='utf-8') as f:
            for result_json_str in result_json_str:
                f.write(result_json_str)
            f.write(',')
            f.write('\n')
    
    
    
    def main():
        global detail_href
        global bus_info
        html = get_city_page()
        city_lists = parse_city_html(html)
        for city_list in city_lists:
            print('一共有{0}个城市'.format(len(city_lists)))
            print('正在爬取第{0}个'.format(city_lists.index(city_list)))
    
            for num in range(1,10):
                print(num)
                url ='https://'+str(city_list)+'.8684.cn/list' +str(num)
                try:
                    print(url)
                    html = get_page(url)
                    parse_html(html)
                except:
                    continue
    
            for detail in detail_href:
    
                for url in detail:
                    try:
                        url = 'https://'+str(city_list)+'.8684.cn' +url
                        print(url)
                        detail_html = get_page(url)
                        parse_detail_html(detail_html)
                    except:
                        continue
            print(bus_info)
            try:
                for bus_info in bus_info:
                    write_to_json(bus_info, 'bus_info')
            except:
                pass
    
            detail_href = []
            bus_info = []
    
    if __name__ == '__main__':
        main()
    
    
    
    

    2.运行完成后,json文件格式,大致有公交路线名,发车停车时间,票价信息,所属公交公司,运行的路线.大概爬取了有20000多条数据

    image.png

    3.把json文件保存进入MongoDB数据库
    (1).启动mongodb服务器
    mongod --dbpath 配置的保数据件路径

    image.png

    (2).进入到MongoDB的bin路径
    mongoimport --db 数据库名 --collection 集合名 --jsonArray json文件路径

    image.png

    保存成功


    image.png

    相关文章

      网友评论

          本文标题:爬取全国各大城市的公交车信息

          本文链接:https://www.haomeiwen.com/subject/qshujqtx.html