美文网首页Python四期爬虫作业
【Python爬虫】-第四期课后练习14

【Python爬虫】-第四期课后练习14

作者: 困困harper | 来源:发表于2017-08-31 10:47 被阅读8次
    # 导入包
    import requests
    from lxml import etree

    #请求url
    url = 'http://www.ygdy8.com/'
    #构造headers字典
    headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': 'UM_distinctid=15c5ec4f20e377-0798b30518d6b4-5393662-c0000-15c5ec4f20f28b; CNZZDATA5783118=cnzz_eid%3D1150691004-1496237600-%26ntime%3D1496237600; 37cs_user=37cs10138604998; cscpvrich4016_fidx=1; 37cs_show=69',
    'Host': 'www.ygdy8.com',
    'If-Modified-Since': 'Sun, 27 Aug 2017 15:18:27 GMT',
    'If-None-Match': "802356bb471fd31:530",
    'Referer': 'https://www.baidu.com/link?url=cnL9usny1BIZEe-NZUkUbeUE4m9CM23KIysNUsVvzlK&wd=&eqid=c50f090f0001d9880000000259a2e4b0',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
    }
    #定义req为一个requests请求的对象
    req = requests.get(url,headers=headers)
    #req这个请求对象的status_code方法获取请求的状态码
    status_code = req.status_code
    print(status_code)
    #指定网页解码方式
    req.encoding = 'gb2312'
    #获取网页源码 用html变量接收 text content方法灵活运用
    html = req.text
    selector=etree.HTML(html)
    # 查询div的id为menu下的div的class为contain下的a标签不包含onclick事件的所有href
    infos = selector.xpath('//div[@id="menu"]/div[@class="contain"]/ul/li[position()<14]/a')
    for info in infos:
    menu_url=url+info.xpath('@href')[0]
    menu_name=info.xpath('text()')[0]
    print(menu_name,menu_url)
    if menu_url=='http://www.ygdy8.com//html/gndy/index.html' or menu_url=='http://www.ygdy8.com/html/gndy/jddy/20160320/50541.html':
    continue
    else:
    res2 = requests.get(menu_url)
    res2.encoding = 'gb2312'
    html2 = res2.text
    #print(html2)
    selector2 = etree.HTML(html2)
    page_total = selector2.xpath('//div[@class="x"]/td/text()')[0].strip().split('/')[0].replace('共', '').replace(
    '页', '')
    right_url = selector2.xpath('//div[@class="x"]//a[1]/@href')[0].replace('2.html', '')
    # #infos=selector2.xpath('//a[@class="ulink"]')
    # #print(len(infos))
    # print(page_total, right_url)
    # 最新电影菜单下共有165个分页, 构造出165个url
    # 存进url_list
    # 国内影片菜单下共有93个页面, 构造出93个url
    # 存进url_list
    url_list = []
    left_url = menu_url.replace('index.html', '')
    new_menu_url = left_url + right_url
    for page in range(1,int(page_total)+1):
    req_url=new_menu_url+str(page)+'.html'
    # print(page,req_url)
    url_list.insert(page,req_url)
    print(len(url_list))
    for list in url_list:
    print(list)

    相关文章

      网友评论

        本文标题:【Python爬虫】-第四期课后练习14

        本文链接:https://www.haomeiwen.com/subject/hitqjxtx.html