爬虫

作者: royluck | 来源:发表于2020-10-06 21:10 被阅读0次
    爬虫前奏:
    • 明确目的,找到数据对应的网页,分析网页的结构找到数据s所在的标签位置
    • 模拟Http请求,向服务器发送这个请求,获取到服务器返回我们的html
    • 用正则表达式提取我们要的数据(名字、人气)
    import re
    from urllib import request
    # 断点调试
    # BeautifulSoup scrapy
    # 爬虫、反爬虫、反反爬虫
    # ip封
    # 代理ip库
    
    class Spider():
        url = 'https://www.panda.tv/cate/lol'
        root_pattern = '<div cass="video-info">([\s\S]*?)</div>' #正则匹配: 组() 贪婪模式
        name_pattern = '</i>([\s\S]*?)</span>'
        number_pattern = '<span cass="video-number">([\s\S]*?)</span>'
        # 获取内容
        def __fetch_content(self):
            r = request.urlopen(Spider.url)
    
            htmls = r.read()
            htmls = str(htmls, encoding='utf-8')
            return htmls
    
        # 分析数据
        def __analysis(self, htmls):
            root_html = re.findall(Spider.root_pattern, htmls)
    
            anchors = []
            for html in root_html:
                name = re.findall(Spider.name_pattern, html)
                number = re.findall(Spider.number_pattern, html)
                anchor = {'name': name, 'number': number}
                anchors.append(anchor)
    
            return anchors
    
        # 数据精炼
        def __refine(self, anchors):
            l = lambda anchor: {
                'name': anchor['name'][0].strip(), # 去除空格和换行符
                'number': anchor['number'][0]
            }
            return map(l, anchors)
    
        def __sort(self, anchors):
            anchors = sorted(anchors, key = self.__sort_seed, reverse=True)
            return anchors
    
        def __sort_seed(self, anchor):
            r = re.findall('\d*', anchor['number'])
            number = float(r[0])
            if '万' in anchor['number']:
                number *= 10000
            return number
    
        def __show(self, anchors):
            # for anchor in anchors:
            #     print(anchor['name']+ '----' + anchor['number'])
    
            for rank in range(0, len(anchors)):
                print('rank '+ str(rank + 1)
                      + '   :  ' + anchors[rank]['name']
                      + '   ' + anchors[rank]['number'])
    
        def go(self):
            htmls = self.__fetch_content()
            anchors = self.__analysis(htmls)
            anchors = list(self.__refine(anchors))
            anchors = self.__sort(anchors)
            self.__show(anchors)
    
    spider = Spider()
    spider.go()
    

    相关文章

      网友评论

          本文标题:爬虫

          本文链接:https://www.haomeiwen.com/subject/vipwghtx.html