美文网首页
老司机教你用爬虫爬p*hub

老司机教你用爬虫爬p*hub

作者: 曾柏超 | 来源:发表于2018-01-24 14:38 被阅读0次
    
    
    import urllib2
    import urllib
    import datetime
    import re
    import os.path
    
    to_find_string="https://bd.phncdn.com/videos/"
    big_path=""
    
    def save_file(this_download_url,path):
        print"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
        time1=datetime.datetime.now()
        print str(time1)[:-7],
        if (os.path.isfile(path)):
            file_size=os.path.getsize(path)/1024/1024
            print "File "+path+" ("+ str(file_size)+"Mb) already exists."
            return
        else:   
            print "Downloading "+path+"..."
            f = urllib2.urlopen(this_download_url) 
            data = f.read() 
            with open(path, "wb") as code:     
                code.write(data)  
            time2=datetime.datetime.now()
            print str(time2)[:-7],
            print path+" Done."
            use_time=time2-time1
            print "Time used: "+str(use_time)[:-7]+", ",
            file_size=os.path.getsize(path)/1024/1024
            print "File size: "+str(file_size)+" MB, Speed: "+str(file_size/(use_time.total_seconds()))[:4]+"MB/s"
    
    
    def download_the_av(url):
        req = urllib2.Request(url)
        content = urllib2.urlopen(req).read()
        while len(content)<100:
            print"try again..."
            content = urllib2.urlopen(req).read()
        print "All length:" +str(len(content))
    
        title_begin=content.find("<title>")
        title_end=content.find("</title>")
        title=content[title_begin+7:title_end-14]
        title=title.replace('/','_')
        title=filter(lambda x:x in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ _-",title)
    
        quality=['720','480','240']
        for i in quality:
            find_position=content.find("\"quality\":\""+i+"\"")
            if find_position>0:
                print "Quality: "+i+"P"
                break
        to_find=content[find_position:find_position+4000]
    
        pattern=re.compile(r"\"videoUrl\":\"[^\"]*\"")
        match = pattern.search(to_find) 
        if match:
            the_url=match.group() 
        the_url=the_url[12:-1]#the real url
        the_url=the_url.replace("\\/","/")
        save_file(the_url,big_path+title+".mp4")
    
    
    
    urls=["https://www.p***hub.com/view_video.php?viewkey=ph592ef8731630a",]
    print len(urls),
    print " videos to download..."
    count=0
    
    for url in urls:
        print count
        count+=1
        download_the_av(url)
    print "All done"
    
    
    import urllib2
    import urllib
    import datetime
    import re
    import os.path
    import requests
    
    def save_file(this_download_url,path):
        print"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "
        time1=datetime.datetime.now()
        print str(time1)[:-7],
        if (os.path.isfile(path)):
            file_size=os.path.getsize(path)/1024/1024
            print "File "+path+" ("+ str(file_size)+"Mb) already exists."
            return
        else:
            print "Downloading "+path+"..."
            r = requests.get(this_download_url,stream=True)
            with open(path.encode('utf-8'), "wb") as code:
                code.write(r.content)
            time2=datetime.datetime.now()
            print str(time2)[:-7],
            print path+" Done."
            use_time=time2-time1
            print "Time used: "+str(use_time)[:-7]+", ",
            file_size=os.path.getsize(path)/1024/1024
            print "File size: "+str(file_size)+" MB, Speed: "+str(file_size/(use_time.total_seconds()))[:4]+"MB/s"
    
    def download_url(website_url):
        fuckyou_header= {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
        req = urllib2.Request(website_url,headers=fuckyou_header)
        content = urllib2.urlopen(req).read()
        while len(content)<100:
            print"try again..."
            content = urllib2.urlopen(req).read()
        print "Web page all length:" +str(len(content))
    
        pattern=re.compile(r"http://m4.26ts.com/[.0-9-a-zA-Z]*.mp4")
        match = pattern.search(content)
    
        if match:
            the_url=match.group()
            save_file(the_url,the_url[19:])
        else:
            print "No video found."
    
    urls=["http://www.46ek.com/view/22133.html",]
    count=0
    print len(urls),
    print " videos to download..."
    for i in urls:
        count+=1
        print count
        download_url(i)
    print "All done"
    

    转载
    https://www.zhihu.com/question/20799742

    相关文章

      网友评论

          本文标题:老司机教你用爬虫爬p*hub

          本文链接:https://www.haomeiwen.com/subject/ksduaxtx.html