美文网首页
Python实战计划学习笔记(3)服务器网页解析

Python实战计划学习笔记(3)服务器网页解析

作者: 如恒河沙 | 来源:发表于2016-08-25 21:51 被阅读0次

    基本步骤

    1. 服务器与本地的交换机制
    • 请求:get,post,head,put,options,connect,trace,delete
      GET /page_one.html HTTP/1.1 Host:www.sample.com
    • 回应:status_code, 网页内容
    1. 解析真实网页获取数据的办法

    练习1代码

    from bs4 import BeautifulSoup
    import requests
    url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
    web_data = requests.get(url)
    soup=BeautifulSoup(web_data.text,'lxml')
    titles = soup.select('div.property_title > a[target="_blank"]')
    imgs = soup.select('img["width=160"]')
    cates = soup.select('div.p13n_reasoning_v2')
    #print(cates)
    
    for title,img,cate in zip(titles,imgs,cates):
        data = {
            'title':title.get_text(),
            'img':img.get('src'),
            'cate':list(cate.stripped_strings)
        }
        print(data)
    

    练习2代码

    from bs4 import BeautifulSoup
    import requests
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
        'Cookie':'ServerPool=B; TASSK=enc%3AJlqgR8kGqJx%2BQ5G1prQO4VrB7mIO388KW48MPK8YGrx3oNqxz4Qrivyzv1qAOmJq4wzYHg6Nnt8%3D; TAUnique=%1%enc%3A4gvUQzUyygu%2FGv6dv3X4cs0eZt5Hvxu2QJt2GYFC5BMrYJESy8uhPw%3D%3D; TART=%1%enc%3Avxr%2Bnb91%2BHKnfVAUrIqY9Rvc7dTzOqGqhbyXR%2Btwq8%2Ffn34Q98xKWNHL2kWDcnWEugT2h1gxo1I%3D; __gads=ID=53237a462a95aa19:T=1472095261:S=ALNI_MaslV_D02aAx6nI-TrYwNCRLdRJJA; CommercePopunder=SuppressAll*1472095465187; SecureLogin2=3.4%3AAKSSWi38zqX%2BT3fkhPOSQgr0%2BHVz2LPSSZT5FanVs%2BO9tX1QLwadr4feDj8yNpzz3uRopyCCn1UeieXMis6p3M12s0WhvKsBKWKxbi5iisSFJ4%2FB1hTiSJBywPkN2evAQV1fqvwLYjrYEIANCVhebx%2BoNZxBfn3Q1sQLgcxhOChP3dqYNr3RlT7JY7dmyyahZd3PL%2FygRCLK4VhJQpo0Rn0%3D; TAAuth3=3%3Ad211cc1d8ec2dbe465e6227d613e4b4f%3AANmawThSa8sZUNHt%2FPQhS4VfIohzg%2BI%2FXTEedryGnHGUxlf24WWO%2B4hIm9t%2FBy8QhhzEPyUBHPvIgrXBGUiUWk2zCazoznlOv%2BSbuPz7IjzR0J%2F1wA0Ij%2FB8csfM2j%2BWFYJOY6hMqy1gAlYgQxq8upgWUnnppTDLQ08pl4ldgCs%2FeY72l8FFJVqJjWbBbkcu3Mh8DKobkbyjHkVwYomJyNx2MGRcEwRclHlmIE%2B5M0fk; TATravelInfo=V2*AC.TYO*A.2*MG.-1*HP.2*FL.3*RVL.60763_237l105127_238*RS.1; TAReturnTo=%1%%2FAttraction_Review-g60763-d105127-Reviews-Central_Park-New_York_City_New_York.html; roybatty=AHPxj2pWAnbIF7xdTtIzIDHe3hrkF9avYgmphcOELIFfjx9L%2Fx0w1fK35vHYbPylGbzpQxRasDfzRQ%2F12Wrsp7F7rLNDaXOhee5ChmK5XW2%2FXL%2BlyUlaCvssO%2Fa3zWvSiwIUilKmrD%2FHtv2e%2BAlh1SJ7V14c5PlxUuJ6bCV6TiQj%2C1; NPID=; TASession=%1%V2ID.E9C70A2D8C9F87BA04BD09846AECE4B9*SQ.48*PR.427%7C*LS.Saves*GR.52*TCPAR.51*TBR.82*EXEX.78*ABTR.51*PPRP.62*PHTB.28*FS.25*CPU.71*HS.popularity*ES.popularity*AS.popularity*DS.5*SAS.popularity*FPS.oldFirst*TS.87561D8E010E069629F3426C834EF8AF*LF.zhCN*FA.1*DF.0*LP.%2F*FBH.2*MS.-1*RMS.-1*FLO.60763*TRA.true*LD.105127; CM=%1%HanaPersist%2C%2C-1%7Cpu_vr2%2C%2C-1%7Ct4b-pc%2C%2C-1%7CHanaSession%2C%2C-1%7CRCPers%2C%2C-1%7CWShadeSeen%2C%2C-1%7Cpu_vr1%2C%2C-1%7CFtrPers%2C%2C-1%7CHomeASess%2C1%2C-1%7CAWPUPers%2C%2C-1%7Ccatchsess%2C5%2C-1%7Cbrandsess%2C%2C-1%7Csesscoestorem%2C%2C-1%7CCCSess%2C%2C-1%7CViatorMCPers%2C%2C-1%7Csesssticker%2C%2C-1%7C%24%2C%2C-1%7Ct4b-sc%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS2%2C%2C-1%7Cb2bmcpers%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS%2C%2C-1%7Csess_rev%2C2%2C-1%7Csessamex%2C%2C-1%7Cperscoestorem%2C%2C-1%7CSaveFtrPers%2C%2C-1%7Cpers_rev%2C%2C-1%7CMetaFtrSess%2C%2C-1%7CRBAPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_PERSISTANT%2C%2C-1%7CFtrSess%2C%2C-1%7CHomeAPers%2C%2C-1%7C+r_lf_1%2C%2C-1%7CRCSess%2C%2C-1%7C+r_lf_2%2C%2C-1%7Ccatchpers%2C3%2C1472700068%7CAWPUSess%2C%2C-1%7Cvr_npu2%2C%2C-1%7Csh%2C%2C-1%7CLastPopunderId%2C104-771-null%2C-1%7Cpssamex%2C%2C-1%7C2016sticksess%2C%2C-1%7Cvr_npu1%2C%2C-1%7CCCPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_SESSION%2C%2C-1%7Cbrandpers%2C%2C-1%7Cb2bmcsess%2C%2C-1%7C2016stickpers%2C%2C-1%7CViatorMCSess%2C%2C-1%7CWarPopunder_Session%2C%2C-1%7CWarPopunder_Persist%2C%2C-1%7CTakeOver%2C%2C-1%7Cr_ta_2%2C%2C-1%7Cr_ta_1%2C%2C-1%7CSaveFtrSess%2C%2C-1%7CRBASess%2C%2C-1%7Cperssticker%2C%2C-1%7CMetaFtrPers%2C%2C-1%7C; TAUD=LA-1472095257697-1*LG-22093740-2.1.F.*LD-22093742-.....'
    }
    url_saves = 'https://cn.tripadvisor.com/Saves#52709824'
    def get_favs(url_saves,data=None):
        web_data = requests.get(url_saves,headers=headers)
        soup = BeautifulSoup(web_data.text,'lxml')
        titles = soup.select('a.location-name')
        imgs = soup.select('img.photo_image')
        metas = soup.select('span.format_address')
    
        for title,img,meta in zip(titles,imgs,metas):
            data = {
                'title':title.get_text(),
                'img':img.get('src'),
                'meta':list(meta.stripped_strings)
            }
            print(data)
    

    练习3代码

    两个函数

    from bs4 import BeautifulSoup
    import requests
    
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
        'Cookie':'ServerPool=B; TASSK=enc%3AJlqgR8kGqJx%2BQ5G1prQO4VrB7mIO388KW48MPK8YGrx3oNqxz4Qrivyzv1qAOmJq4wzYHg6Nnt8%3D; TAUnique=%1%enc%3A4gvUQzUyygu%2FGv6dv3X4cs0eZt5Hvxu2QJt2GYFC5BMrYJESy8uhPw%3D%3D; TART=%1%enc%3Avxr%2Bnb91%2BHKnfVAUrIqY9Rvc7dTzOqGqhbyXR%2Btwq8%2Ffn34Q98xKWNHL2kWDcnWEugT2h1gxo1I%3D; __gads=ID=53237a462a95aa19:T=1472095261:S=ALNI_MaslV_D02aAx6nI-TrYwNCRLdRJJA; CommercePopunder=SuppressAll*1472095465187; SecureLogin2=3.4%3AAKSSWi38zqX%2BT3fkhPOSQgr0%2BHVz2LPSSZT5FanVs%2BO9tX1QLwadr4feDj8yNpzz3uRopyCCn1UeieXMis6p3M12s0WhvKsBKWKxbi5iisSFJ4%2FB1hTiSJBywPkN2evAQV1fqvwLYjrYEIANCVhebx%2BoNZxBfn3Q1sQLgcxhOChP3dqYNr3RlT7JY7dmyyahZd3PL%2FygRCLK4VhJQpo0Rn0%3D; TAAuth3=3%3Ad211cc1d8ec2dbe465e6227d613e4b4f%3AANmawThSa8sZUNHt%2FPQhS4VfIohzg%2BI%2FXTEedryGnHGUxlf24WWO%2B4hIm9t%2FBy8QhhzEPyUBHPvIgrXBGUiUWk2zCazoznlOv%2BSbuPz7IjzR0J%2F1wA0Ij%2FB8csfM2j%2BWFYJOY6hMqy1gAlYgQxq8upgWUnnppTDLQ08pl4ldgCs%2FeY72l8FFJVqJjWbBbkcu3Mh8DKobkbyjHkVwYomJyNx2MGRcEwRclHlmIE%2B5M0fk; TATravelInfo=V2*AC.TYO*A.2*MG.-1*HP.2*FL.3*RVL.60763_237l105127_238*RS.1; TAReturnTo=%1%%2FAttraction_Review-g60763-d105127-Reviews-Central_Park-New_York_City_New_York.html; roybatty=AHPxj2pWAnbIF7xdTtIzIDHe3hrkF9avYgmphcOELIFfjx9L%2Fx0w1fK35vHYbPylGbzpQxRasDfzRQ%2F12Wrsp7F7rLNDaXOhee5ChmK5XW2%2FXL%2BlyUlaCvssO%2Fa3zWvSiwIUilKmrD%2FHtv2e%2BAlh1SJ7V14c5PlxUuJ6bCV6TiQj%2C1; NPID=; TASession=%1%V2ID.E9C70A2D8C9F87BA04BD09846AECE4B9*SQ.48*PR.427%7C*LS.Saves*GR.52*TCPAR.51*TBR.82*EXEX.78*ABTR.51*PPRP.62*PHTB.28*FS.25*CPU.71*HS.popularity*ES.popularity*AS.popularity*DS.5*SAS.popularity*FPS.oldFirst*TS.87561D8E010E069629F3426C834EF8AF*LF.zhCN*FA.1*DF.0*LP.%2F*FBH.2*MS.-1*RMS.-1*FLO.60763*TRA.true*LD.105127; CM=%1%HanaPersist%2C%2C-1%7Cpu_vr2%2C%2C-1%7Ct4b-pc%2C%2C-1%7CHanaSession%2C%2C-1%7CRCPers%2C%2C-1%7CWShadeSeen%2C%2C-1%7Cpu_vr1%2C%2C-1%7CFtrPers%2C%2C-1%7CHomeASess%2C1%2C-1%7CAWPUPers%2C%2C-1%7Ccatchsess%2C5%2C-1%7Cbrandsess%2C%2C-1%7Csesscoestorem%2C%2C-1%7CCCSess%2C%2C-1%7CViatorMCPers%2C%2C-1%7Csesssticker%2C%2C-1%7C%24%2C%2C-1%7Ct4b-sc%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS2%2C%2C-1%7Cb2bmcpers%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS%2C%2C-1%7Csess_rev%2C2%2C-1%7Csessamex%2C%2C-1%7Cperscoestorem%2C%2C-1%7CSaveFtrPers%2C%2C-1%7Cpers_rev%2C%2C-1%7CMetaFtrSess%2C%2C-1%7CRBAPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_PERSISTANT%2C%2C-1%7CFtrSess%2C%2C-1%7CHomeAPers%2C%2C-1%7C+r_lf_1%2C%2C-1%7CRCSess%2C%2C-1%7C+r_lf_2%2C%2C-1%7Ccatchpers%2C3%2C1472700068%7CAWPUSess%2C%2C-1%7Cvr_npu2%2C%2C-1%7Csh%2C%2C-1%7CLastPopunderId%2C104-771-null%2C-1%7Cpssamex%2C%2C-1%7C2016sticksess%2C%2C-1%7Cvr_npu1%2C%2C-1%7CCCPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_SESSION%2C%2C-1%7Cbrandpers%2C%2C-1%7Cb2bmcsess%2C%2C-1%7C2016stickpers%2C%2C-1%7CViatorMCSess%2C%2C-1%7CWarPopunder_Session%2C%2C-1%7CWarPopunder_Persist%2C%2C-1%7CTakeOver%2C%2C-1%7Cr_ta_2%2C%2C-1%7Cr_ta_1%2C%2C-1%7CSaveFtrSess%2C%2C-1%7CRBASess%2C%2C-1%7Cperssticker%2C%2C-1%7CMetaFtrPers%2C%2C-1%7C; TAUD=LA-1472095257697-1*LG-22093740-2.1.F.*LD-22093742-.....'
    }
    url_saves = 'https://cn.tripadvisor.com/Saves#52709824'
    url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
    
    def get_attractions(url,data=None):
        web_data = requests.get(url)
        soup=BeautifulSoup(web_data.text,'lxml')
        titles = soup.select('div.property_title > a[target="_blank"]')
        imgs = soup.select('img["width=160"]')
        cates = soup.select('div.p13n_reasoning_v2')
        #print(cates)
        for title,img,cate in zip(titles,imgs,cates):
            data = {
                'title':title.get_text(),
                'img':img.get('src'),
                'cate':list(cate.stripped_strings)
            }
            print(data)
    
    def get_favs(url_saves,data=None):
        web_data = requests.get(url_saves,headers=headers)
        soup = BeautifulSoup(web_data.text,'lxml')
        titles = soup.select('a.location-name')
        imgs = soup.select('img.photo_image')
        metas = soup.select('span.format_address')
    
        for title,img,meta in zip(titles,imgs,metas):
            data = {
                'title':title.get_text(),
                'img':img.get('src'),
                'meta':list(meta.stripped_strings)
            }
            print(data)
    
    get_attractions(url)
    get_favs(url_saves)
    

    练习4代码

    延时爬取所有条目

    from bs4 import BeautifulSoup
    import requests
    import time
    
    url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
    urls=['https://cn.tripadvisor.com/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST'.format(str(i)) for i in range(30,930,30)]
    
    def get_attractions(url,data=None):
        web_data = requests.get(url)
        time.sleep(4)   #延时请求,避免被网站列入黑名单
        soup=BeautifulSoup(web_data.text,'lxml')
        titles = soup.select('div.property_title > a[target="_blank"]')
        imgs = soup.select('img["width=160"]')
        cates = soup.select('div.p13n_reasoning_v2')
        #print(cates)
        for title,img,cate in zip(titles,imgs,cates):
            data = {
                'title':title.get_text(),
                'img':img.get('src'),
                'cate':list(cate.stripped_strings)
            }
            print(data)
    
    for single_url in urls:
        get_attractions(single_url)
    

    练习5代码

    伪装移动端爬取真实图片地址

    from bs4 import BeautifulSoup
    import requests
    
    #通过伪造User-Agent信息伪装iPhone
    headers = {
        'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
    }
    url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
    
    info=[]
    web_data = requests.get(url,headers=headers)
    soup=BeautifulSoup(web_data.text,'lxml')
    #print(soup)
    titles= soup.select('div.location')
    imgs = soup.select('div.thumb.thumbLLR.soThumb > div.missing.lazyMiss')
    for title,img in zip(titles,imgs):
        data = {
            'title':title.get_text()[1:-1],
            'img':img.get('data-thumburl')
        }
        print(data)
        info.append(data)
    print(info)
    print('共有',len(info),'条记录')
    

    运行结果


    1.jpg

    相关文章

      网友评论

          本文标题:Python实战计划学习笔记(3)服务器网页解析

          本文链接:https://www.haomeiwen.com/subject/rwzosttx.html