美文网首页
04_爬取策略&bs4

04_爬取策略&bs4

作者: 郑元吉 | 来源:发表于2018-11-28 12:03 被阅读9次

    一. 爬取策略

    ​ 在爬虫系统中,待抓取URL队列是很重要的一部分。待抓取URL队列中的URL以什么样的顺序排列也是一个很重要的问题,因为这涉及到先抓取哪个页面,后抓取哪个页面。而决定这些URL排列顺序的方法,叫做抓取策略。下面重点介绍几种常见的抓取策略:

    • 深度(递归)优先遍历策略
      深度优先遍历策略是指网络爬虫会从起始页开始,一个链接一个链接跟踪下去,处理完这条线路之后再转入下一个起始页,继续跟踪链接。
    import re
    import requests
    
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}
    
    hrefre = "<a.*href=\"(https?://.*?)\".*>"
    
    def getPage(url):
        '''
        获取html
        :param url:
        :return: html源码
        '''
        html = requests.get(url, headers=header)
        return html.text
    
    def getUrl(url):
        '''
        获取url
        :param url:
        :return: URLList
        '''
        html = getPage(url)
        urllist = re.findall(hrefre, html)
        return urllist
    
    def deepSpider(url, depth):
        '''
        深度爬虫
        :param url:
        :param depth:深度控制
        :return:
        '''
        print("\t\t\t" * depthDict[url], "爬取了第%d级页面:%s" % (depthDict[url], url))
    
        if depthDict[url] > depth:
            return  # 超出深度则跳出
        sonlist = getUrl(url)
        for i in sonlist:
            if i not in depthDict:
                depthDict[i] = depthDict[url] + 1  # 层级+1
                deepSpider(i, depth)
    
    if __name__ == '__main__':
        depthDict = {}  # 爬虫层级控制
        # 起始url
        startUrl = "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=岛国邮箱"
        depthDict[startUrl] = 1
        deepSpider(startUrl, 4)
    
    
    • 广度(队列)优先遍历策略
      宽度优先遍历策略的基本思路是,将新下载网页中发现的链接直接**待抓取URL队列的末尾。也就是指网络爬虫会先抓取起始网页中链接的所有网页,然后再选择其中的一个链接网页,继续抓取在此网页中链接的所有网页。还是以上面的图为例:遍历路径:A-B-C-D-E-F-G-H-I
    import re
    import requests
    
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}
    
    hrefre = "<a.*href=\"(https?://.*?)\".*>"
    
    def getUrl(url):
        '''
        获取网页的全部url
        :param url:
        :return: url列表
        '''
        html = getPage(url)
        '''
        <a data-click="{}" href="http://www.baidu.com/" fasdf>...</a>
        '''
        urlre = "<a.*href=\"(https?://.*?)\".*>"
        urllist = re.findall(urlre, html)
        return urllist
    
    def getPage(url):
        '''
        抓取网页html
        :param url:
        :return: HTML源码
        '''
        html = requests.get(url, headers=header).text
        return html
    
    def vastSpider(depth):
        while len(urlList) > 0:
            url = urlList.pop(0)  # 弹出首个url
            print("\t\t\t" * depthDict[url], "抓取了第%d级页面:%s" % (depthDict[url], url))
    
            if depthDict[url] < depth:
                sonList = getUrl(url)
                for s in sonList:
                    if s not in depthDict: # 去重
                        depthDict[s] = depthDict[url] + 1
                        urlList.append(s)
    
    if __name__ == '__main__':
        # 去重
        urlList = []  # url列表
    
        depthDict = {}
        starUrl = "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=岛国邮箱"
        depthDict[starUrl] = 1
        urlList.append(starUrl)
        vastSpider(4)
    
    

    二. 页面解析和数据提取

    一般来讲对我们而言,需要抓取的是某个网站或者某个应用的内容,提取有用的价值。内容一般分为两部分,非结构化的数据 和 结构化的数据。

    • 非结构化数据:先有数据,再有结构,
    • 结构化数据:先有结构、再有数据

    不同类型的数据,我们需要采用不同的方式来处理。

    • 非结构化的数据处理
    HTML
    
    • 结构化的数据处理
    JSON
    XML
    

    Beautiful Soup 4.2.0 文档

    https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html

    示例:爬取前程无忧招聘岗位数量

    from bs4 import BeautifulSoup
    import requests
    
    def download(url):
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
        response = requests.get(url, headers=headers)
        html = response.content.decode('gbk')
        
        soup = BeautifulSoup(html, 'lxml')
        # 获取岗位数量的多种查找方式
        # 方式1: 使用find_all
        jobnum = soup.find_all('div', class_='rt')
        print(jobnum[0].text)
        
        # 方式2: 使用select
        jobnum = soup.select('.rt')[0].string
        print(jobnum.strip())  # 去掉首尾空格
    
        # 方式3:正则匹配re
        # jobnum_re = '<div class="rt">(.*?)</div>'
        # jobnum_comp = re.compile(jobnum_re, re.S)
        # jobnums = jobnum_comp.findall(html)
        # print(jobnums[0])
    
    download(url = "https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=")
    
    

    示例:爬取股票基金

    import urllib
    from urllib import request
    from bs4 import BeautifulSoup
    
    stockList = []
    
    def download(url):
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
        request = urllib.request.Request(url, headers=headers)  # 请求,修改,模拟http.
        data = urllib.request.urlopen(request).read()  # 打开请求,抓取数据
        
        soup = BeautifulSoup(data, "html5lib", from_encoding="gb2312")
        mytable = soup.select("#datalist")
        for line in mytable[0].find_all("tr"):
            print(line.get_text())  # 提取每一个行业
            print(line.select("td:nth-of-type(3)")[0].text) # 提取具体的某一个
    
    if __name__ == '__main__':
        download("http://quote.stockstar.com/fund/stock_3_1_2.html")
    
    

    示例:爬取腾讯岗位说明

    import urllib
    from urllib import request
    from bs4 import BeautifulSoup
    
    def download(url):
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
        request = urllib.request.Request(url, headers=headers) # 请求,修改,模拟http.
        data = urllib.request.urlopen(request).read() # 打开请求,抓取数据
        
        soup = BeautifulSoup(data, "html5lib")
        print(soup)
        data = soup.find_all("ul", class_="squareli")
        for dataline in data:
            for linedata in dataline.find_all("li"):
                print(linedata.string)
            
        data = soup.select('ul[class="squareli"]')
        for dataline in data:
            for linedata in dataline.select("li"):
                print(linedata.get_text())
    
    download("https://hr.tencent.com/position_detail.php?id=43940&keywords=%E7%88%AC%E8%99%AB&tid=0&lid=0")
    

    示例:获取腾讯岗位列表

    import urllib
    from urllib import request
    from bs4 import BeautifulSoup
    
    def download(url):
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
        request = urllib.request.Request(url, headers=headers) # 请求,修改,模拟http.
        data = urllib.request.urlopen(request).read() # 打开请求,抓取数据
        soup = BeautifulSoup(data, "lxml")
        
        data = soup.find_all("table", class_="tablelist")
        for line in data[0].find_all("tr", class_=["even", "odd"]):
            print(line.find_all("td")[0].a["href"])
            for data in line.find_all("td"):
                print(data.string)
    
    download("https://hr.tencent.com/position.php?keywords=python&lid=0&tid=0#a")
    
    

    存入数据库

    import pymysql
    
    ## 存入数据库
    def save_job(tencent_job_list):
    
        # 连接数据库
        db = pymysql.connect(host="127.0.0.1", port=3306, user='root', password="root",database='tencent1', charset='utf8')
        # 游标
        cursor = db.cursor()
    
        # 遍历,插入job
        for job in tencent_job_list:
            sql = 'insert into job(name, address, type, num) VALUES("%s","%s","%s","%s") ' % (job["name"], job["address"], job["type"], job["num"])
            cursor.execute(sql)
            db.commit()
    
        cursor.close()
        db.close()
    

    相关文章

      网友评论

          本文标题:04_爬取策略&bs4

          本文链接:https://www.haomeiwen.com/subject/dnsaqqtx.html