美文网首页
爬妹子图

爬妹子图

作者: 交易狗二哈 | 来源:发表于2017-03-24 20:43 被阅读94次

    利用 selenium + PhantomJS 爬取 妹子图

    import requests
    import time
    import os
    from selenium import webdriver
    
    class Spider(object):
    
        def __init__(self):
            self.driver = webdriver.PhantomJS()
    
        def Get_pages(self, maxpage):
            # 想要爬取的页面数量
            for page in range(1, maxpage+1):
                url = "http://www.mzitu.com/page/" + str(page)
                self.Get_content_page(url)
    
        def Get_content_page(self, url):
            # 获取封面图片对应的详细图片地址
            self.driver.get(url)
            a = self.driver.find_elements_by_id('pins')
            b = a[0].find_elements_by_tag_name('a')
            links = []
            for i in b:
                if i.get_attribute('href') not in links:  # 该网页源码片段有两次网址重复,过滤下
                    links.append(i.get_attribute('href'))
            for link in links:
                self.Get_picture_page(link)
    
        def Get_picture_page(self, url):
            #获取该组图片数量及图片下载地址
            self.driver.get(url)
            title_site = self.driver.find_element_by_tag_name('h2')         #该组图片名字位置
            title = title_site.text                                         #该组图片名字
            os.mkdir(title)                                                 #以该名字建立文件夹
            pages_site = self.driver.find_element_by_class_name('pagenavi')  #从标签栏找出图片最大页数
            all_sites = pages_site.find_elements_by_tag_name('a')
            page = []
            for i in all_sites:
                page.append(i.text)
            picture_max_number = int(page[-2]) + 1                             #最大页数在倒二位置
            for i in range(1, picture_max_number):
                picture_link = url + '/' + str(i)
                self.Download_picture(picture_link, title, i)
            print('Done one')
    
    
    
        def Download_picture(self,link,filename,picture_number):
            #下载图片
    
            headers = {                                                         #图片下载链接的请求头
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Encoding': 'gzip, deflate, sdch',
                'Accept-Language': 'zh-CN,zh;q=0.8',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
                'Host': 'i.meizitu.net',
                'If-Modified-Since': 'Thu, 02 Mar 2017 14:12:51 GMT',
                'If-None-Match': "58b82863-28887",
                'Upgrade-Insecure-Requests': '1',
                'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
            }
            time.sleep(1)
            self.driver.get(link)
            path = os.getcwd() + '\\' + filename + '\\' + str(picture_number) + '.jpg'
            e = self.driver.find_element_by_tag_name('p')
            f = e.find_element_by_tag_name('img')
            picture_download_link = f.get_attribute('src')
            picture = requests.get(picture_download_link, headers=headers)
            if picture.status_code == 200:
                open(path, 'wb').write(picture.content)
    
    if __name__ == '__main__':
        spider = Spider()
        spider.Get_pages(3)
    
    

    By The Way : 营养跟不上了

    相关文章

      网友评论

          本文标题:爬妹子图

          本文链接:https://www.haomeiwen.com/subject/bifqottx.html