美文网首页
数据采集-爬虫实战(进阶)

数据采集-爬虫实战(进阶)

作者: CoderInsight | 来源:发表于2023-02-15 08:29 被阅读0次

8.爬虫实战:批量保存、下载大文件

保存文件的时候,之前的方式里都是将数据写如内存中,然后等待全部下载完成之后再保存到本地文件中;当我们在下载视频或者其他比较大的文件的时候,采用直接缓存到内存的方式可能会出现内存溢出的现象,所以在针对流媒体文件的时候可以设置流在传输过程中数据块的大小(chunk_size),此时就不是等待所有的文件都下载到内存中再保存到本地文件中了,而是会分批次将文件块中的内容保存到文件中。

# 导入requests模块和bs4模块
import requests
from bs4 import BeautifulSoup

# 加入请求头,防止网站监测出来我们是爬虫,所以都必须要引入请求;对于有需要 登录 的页面需要加上cookices,那么直接在header字典中将 " 'Cookie': '你的cookie' "添加进去
header={
 "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}
# 引入要爬取的网页的url
url = 'http://www.weather.com.cn/weather/101120101.shtml'

# 1、获取网页的响应
# url,请求头,设置访问超时时间
response = requests.get(url, headers=header,timeout=30)
# 2,当网页获取成功的之后,设置网页的编码格式,方式出现中文的乱码
response.encoding='utf-8'

# 3,从获取到的文本连接中获取对应的网页源代码字符串
data= response.text
print(data)
from bs4 import BeautifulSoup

# 使用BeautifulSoup解析网页字符串
soup = BeautifulSoup(data,'lxml')
# 查找所有img标签
images = soup.find_all("img")

# 1.测试一:保存一张图片
# 指定使用流式下载的方式
response = requests.get(images[0]["src"], stream=True)
# stream loading
with open(r'./picture/单张图片测试下载.png','wb') as f:
    # 指定每次下载的二进制流是 32 字节
    for chunk in response.iter_content(chunk_size=32):
        f.write( chunk )
        
        
# 2.测试二:保存多个文件
for image in images:
    # 获取图片中的url
    url = image["src"]
    # 指定文件的下载的方式:采用流下载的方式
    response = requests.get(url, stream=True)
    img = response.content
    count = count + 1
    # 使用C语言的格式(类似与占位符的方式)进行拼接 路径字符串 (此时的路径需要提前创建!!!或者再引入os库将创建文件夹)
    path = r'./picture/%s.jpg'%(count)
    # 以写和二进制的权限,打开文件,此时的文件会自动创建
    with open(path,'wb') as f:
        for chunk in response.iter_content(chunk_size=32):
            f.write(chunk)

9.爬虫实战:多进程爬虫

此时解析网页采用的库仍然是在“爬虫实战:豆瓣图书Top250”的实验中的Xpath,但是在进程爬取的过程中采用了多进程模块。

import requests
from lxml import etree
import time
import multiprocessing

def resolveHtml(urls):
    header={
   "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36"
}
    for url in urls:
        # 1、获取网页的响应
        # url,请求头,设置访问超时时间
        response = requests.get(url, headers=header,timeout=10)
        # 2,当网页获取成功的之后,设置网页的编码格式,方式出现中文的乱码
        response.encoding='utf-8'

        # 3,从获取到的文本连接中获取对应的网页源代码字符串
        data = response.text

        # 4.对获取到的网页源代码字符串使用etree解析
        s=etree.HTML(data)

        # 5.遍历当前页中的所有table
        books=s.xpath("//*[@id='content']/div/div[1]/div/table")
        
        # 在执行到每一个url的时候,设置请求的延时时间,也就是说在请求一次之后隔多长时间再执行一次
        time.sleep(2)
        
        # 6.在table中指定相对路径,把书名、超链接、评分、描述依次获取出来,然后格式化打印输出
        for div in books:
            title = div.xpath("./tr/td[2]/div[1]/a/@title")[0]
            href =div.xpath("./tr/td[2]/div[1]/a/@href")
            score=div.xpath("./tr/td[2]/div[2]/span[2]/text()")[0]
            scrible=div.xpath("./tr/td[2]/p[2]/span/text()")
            # 由于在源代码中部分书籍的描述信息有的是空的,所以这里做了一个判断
            if len(scrible) > 0:
                print("{}: {},{},{}\n".format(title,score,href,scrible[0]))
            else:
                print("{}: {},{}\n".format(title,score,href))

                
if __name__ == '__main__':
    start = time.time()
    urls = []
    # 打印你当前机器中是有多少核的
    print(multiprocessing.cpu_count())
    # 根据你的实际情况进行填写就行,比如我只想用2核去创建进程池
    pool = multiprocessing.Pool(2)
    
    # 根据在前端单击页面的规律,拼接url
    for a in range(10):
        # 此时的 url 根据range的区间不断变化的
        url = 'https://book.douban.com/top250?start={}'.format(a*25)
        urls.append(url)
        
    # 此时在爬取的过程有可能会出现异常,所以此时可以直接做一个异常处理
    try:
        # 给进程池中的进程传递解析代码和urls
        pool.map(resolveHtml(urls), urls)
    except BaseException:
        print("此时出现异常!")
    # 在执行完毕之后关闭进程池
    pool.close()

10.爬虫实战:前程无忧招聘网站爬取

#########################更改依次爬取下一页##################################
import requests
from bs4 import BeautifulSoup
import time
from lxml import etree
import os


# 1,先手动获取第一页的 url
url = "https://search.51job.com/list/000000,000000,0000,01%252C38,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE%25E5%25BC%2580%25E5%258F%2591%25E5%25B7%25A5%25E7%25A8%258B%25E5%25B8%2588,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
# 2,从玩网页Response中获取请求头
header = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Cookie": "guid=a0f2e3f163cd28d84d034e2fe891cd6f; adv=adsnew%3D1%26%7C%26adsresume%3D1%26%7C%26adsfrom%3Dhttps%253A%252F%252Fwww.baidu.com%252Fother.php%253Fsc.Ks0000j16i1eItCE74s-XvROuaQ5-x95MTVIlTUjmLl2QcLFPm6h9y2KGXHBCQx8eMIhG356dkXmHZBRNYMRv6RwmiK0HJbLE-Do1WAYce0zBCL3x0qW4V28edNSdTOB0IDtn9KoXim9yLrOzJbqZJ4F-4VpkXZZUePBEIfixQjPcpi1w3Lxoy0NjLbuT5ns2UDcg-2ZT4quouOn5gCoULQb2jVS.7b_NR2Ar5Od66CHnsGtVdXNdlc2D1n2xx81IZ76Y_uQQr1F_zIyT8P9MqOOgujSOODlxdlPqKMWSxKSgqjlSzOFqtZOmzUlZlS5S8QqxZtVAOtIO0hWEzxkZeMgxJNkOhzxzP7Si1xOvP5dkOz5LOSQ6HJmmlqoZHYqrVMuIo9oEvpSMG34QQQYLgFLIW2IlXk2-muCyr1FkzTf.TLFWgv-b5HDkrfK1ThPGujYknHb0THY0IAYqkea11neXYtT0IgP-T-qYXgK-5H00mywxIZ-suHY10ZIEThfqkea11neXYtT0ThPv5HD0IgF_gv-b5HDdnHm4Pj6drj60UgNxpyfqnHDYrHmsPWD0UNqGujYknjR3rHR1nfKVIZK_gv-b5HDkPHnY0ZKvgv-b5H00mLFW5HDvrjTY%2526ck%253D4915.1.60.331.154.332.162.187%2526dt%253D1587972417%2526wd%253D%2526tpl%253Dtpl_11534_21264_17382%2526l%253D1516948588%2526us%253DlinkName%25253D%252525E6%252525A0%25252587%252525E5%25252587%25252586%252525E5%252525A4%252525B4%252525E9%25252583%252525A8-%252525E4%252525B8%252525BB%252525E6%252525A0%25252587%252525E9%252525A2%25252598%252526linkText%25253D%252525E3%25252580%25252590%252525E5%25252589%2525258D%252525E7%252525A8%2525258B%252525E6%25252597%252525A0%252525E5%252525BF%252525A751Job%252525E3%25252580%25252591-%25252520%252525E5%252525A5%252525BD%252525E5%252525B7%252525A5%252525E4%252525BD%2525259C%252525E5%252525B0%252525BD%252525E5%2525259C%252525A8%252525E5%25252589%2525258D%252525E7%252525A8%2525258B%252525E6%25252597%252525A0%252525E5%252525BF%252525A7%2521%252526linkType%25253D%26%7C%26adsnum%3D2004282; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60000000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA01%2C38%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%B4%F3%CA%FD%BE%DD%BF%AA%B7%A2%B9%A4%B3%CC%CA%A6%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch1%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA01%2C38%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%B4%F3%CA%FD%BE%DD%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch2%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA01%2C38%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch3%7E%60120200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAHadoop%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch4%7E%60120200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21",
"Host": "search.51job.com",
"Referer": "https://search.51job.com/list/000000,000000,0000,01%252C38,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE%25E5%25BC%2580%25E5%258F%2591%25E5%25B7%25A5%25E7%25A8%258B%25E5%25B8%2588,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
}



# 3,传入 url(我们手动指定的url),请求头,设置访问超时时间
response = requests.get(url, headers=header,timeout=30)
# 4,设置响应文本的编码格式 gbk 
response.encoding="gbk"
# 5,获取响应对象中的网页字符串
data= response.text
# 6,使用BeautifulSoup 对网页字符串进行解析
soup = BeautifulSoup(data,'lxml')

# 7,定义计数器变量,用于统计爬取的记录数
count = 1


# 8,将爬取的文本保存到本地文件系统中
# 以写的权限打开一个文件,指定字符的编码为 gbk
myfile = open('./zhaopin2.csv','w',encoding='gbk')




# 10,解析网页:获取所有的 工作(jobs) 信息
jobs = soup.find(id="resultList").select('div[class="el"]')
# 11,依次遍历第一个页中jobs
for job in jobs:
    # 12,获取我们想要从网页中获取的数据或者连接
    url = job.find("a")["href"]  # 查看 "职位详情" 的 url
    title = job.find("a")["title"] # 职位名称
    city = job.find(class_="t3").text # 城市名称
    salary = job.find(class_="t4").text  # 薪资水平
    updateTime = job.find(class_="t5").text # 职位更新时间
    
    time.sleep(2) # 设置在爬取的时候的时间间隔为2秒
    
    # 13,根据获取到的 "职位详情" 中的 url,然后再次请求
    resonse = requests.get(url,headers=header,timeout=10)
    # 14,同样设置响应的编码格式,防止出现乱码
    resonse.encoding="gbk"
    # 15,获取 "职位详情" 的网页字符串
    data = resonse.text
    # 16,此时采用Xpath进行网页的解析,所以创建一个etree的HTML对象
    details = etree.HTML(data)
    # 17,从网页中找到指定的元素,然后右键 Copy Xpath,从而得到 "职位详情" 信息;注意此时返回值是一个列表
    detail = details.xpath("/html/body/div[3]/div[2]/div[3]/div[1]/div/p/text()")
    
    # 18,将所有获取到的值进行字符串的拼接;其中将列表转成转成字符串,直接使用 str()函数就行
    outData = title+","+city+","+salary+","+updateTime+","+str(detail)
#     print(outData)
    # 19,将拼接好的字符串写到指定的文件中
    myfile.write(outData)
    # 在每一行结束之后打印换行符
    myfile.write("\n")
    
    # 20,统计当前爬取的页数
    count = count+1

# 当循环结束之后,表示完成了当前页的数据爬取    
print("第0页中的职位爬取完成")



# 21,再爬取剩下的页数:此时只爬取前10页的数据

# 9,解析网页:获取 "下一页" 的按钮连接属性
page_next_url = soup.find(class_="p_in").select('a')[-1:][0]["href"]
# print(page_next_url)

for page in range(10):
    # 22,此时请求的url是 "下一页" 的按钮连接信息
    response_pages = requests.get(page_next_url,headers=header,timeout=10)
    # 同样设置字符的编码信息
    response_pages.encoding="gbk"
    
    # 23,获取当前请求的 网页字符串
    data_pages = response_pages.text
    # 24,此时也是采用 BeautifulSoup 进行网页的解析
    soup_pages = BeautifulSoup(data_pages,'lxml')
    # 25,获取所有的工作信息
    jobs_pages = soup_pages.find(id="resultList").select('div[class="el"]')
    
    # 26,将当前请求页中的 "下一页" 按钮的连接信息获取出来,然后重新赋值给 page_next_url
    page_next_url = soup_pages.find(class_="p_in").select('a')[-1:][0]["href"]
    
    # 27,以下为获取我们想要的工作信息和职位详细描述
    for job in jobs_pages:
        url = job.find("a")["href"]
        title = job.find("a")["title"]
        city = job.find(class_="t3").text
        salary = job.find(class_="t4").text
        updateTime = job.find(class_="t5").text

        time.sleep(2)
        resonse = requests.get(url,headers=header,timeout=10)
        resonse.encoding="gbk"

        data = resonse.text

        details = etree.HTML(data)

        detail = details.xpath("/html/body/div[3]/div[2]/div[3]/div[1]/div/p/text()")
#         print(url)
#         print("==========")
        outData = title+","+city+","+salary+","+updateTime+","+str(detail)
#         print(outData)
        
        myfile.write(outData)
        myfile.write("\n")
        
#         print("=--------写入文件成功-----------=")

        count = count+1
    
    
    print("当前是第{}页".format(page+1))
    print("爬取了{}个职位\n".format(count))
    
# 等待所有的数据全部写入之后再关闭文件的连接    
myfile.close()
print("文件写入完毕!")
print("----------------------------------------------------------------------")
print(count)

11,补充视频的抓取

// 手动爬取视频(解析.m3u8,即视频拆分后的合并下载)
ffmpeg -protocol_whitelist "file,http,https,tcp,tls" ^
-i "https://3.mhbobo.com/20190805/dePnS34U/1200kb/hls/index.m3u8" ^
-c copy "D:\video\test-31.mp4"

ffmpeg -protocol_whitelist "file,http,https,tcp,tls" ^
-i "https://v4.szjal.cn/20200704/dYkxCVpi/1000kb/hls/index.m3u8" ^
-c copy "E:\jqdz\test.mp4"

相关文章

网友评论

      本文标题:数据采集-爬虫实战(进阶)

      本文链接:https://www.haomeiwen.com/subject/zxmfkdtx.html