美文网首页Python爬虫作业
2017/5/3 爬取知乎搜索结果

2017/5/3 爬取知乎搜索结果

作者: Carpe | 来源:发表于2017-05-03 18:58 被阅读98次

    作业思路

    要被考试给急疯了...背书背得脑壳疼,具体思路在代码里有,分析由于时间关系,就不详述了。
    选取了一个另一个方向来爬取答案,就是爬取搜索结果,所涉及到的知识点稍微多了一些,有提交表单,以html形式保存的json,然后尝试了一下如何保存到数据库中。
    还有一个要注意的是,就是有的用户名是不规则的,它和个人简介合起来是一个超链接内容,这个还是比较难处理的,所以就做了下简化处理。

    爬取的页面

    作业结果

    image.png

    作业代码

    # -*- coding: utf-8 -*-
    
    """
    项目目标
    1.问题的标题
    2.点赞数
    3.回答的简略说明
    4.回答的作者(要区分于匿名用户)
    5.评论数
    
    """
    import requests
    import sys
    reload(sys)
    sys.setdefaultencoding("utf-8")
    import json
    from bs4 import BeautifulSoup
    import MySQLdb
    
    def getdata():
    
        headers = {"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"}
        cookies = {
            'd_c0': '"AHACIj6lFguPTkEZTVmz8MPJ8UkcuI03hag=|1483278273"',
            '_zap': '108369f2-93e6-407e-8928-b57f44168838',
            'q_c1': '0d3d690c67bc4b12a163beb462e17d67|1492571575000|1483278273000',
            'cap_id': '"NmFlNDgyZmMxNWQ0NGFiYmFhZThmMGM2MDVkZDM1MWM=|1493684435|1958b2bb9fb6f09116056cc4bc5af76e00dbaff7"',
            'l_cap_id': '"ZjViYmQ0MzBkNTFjNGJjOWE5YTExZWRjMjlkNjM2YjQ=|1493684435|1204e5c054805d8bebf3b49a8a2fc09e2f1bd870"',
            'auth_type': '"c2luYQ==|1493684515|245a0f575f151c486829c687ba3d3c90fc5fa558"',
            'token': '"Mi4wMGljWjNYRkVBNzIyRGJjZDZkMjMyZmMwM2JtMWs=|1493684515|a76491c048b7e825a77983ef0a272bdc9e721853"',
            'client_i': '"NTA3MzY2MzQwNA==|1493684515|9a08a2c9a6c02e86da4810cb5bc4437f6d7b1028"',
            'aliyungf_tc': 'AQAAAFOsE04F5QQAHVcc2i2qHTGdX1x0',
            'acw_tc': 'AQAAAAnnBnQS0AcAHVcc2omDblHld+wt',
            '_xsrf': '398e0b041f39355dadfc06b76ba18cdb',
            's-q': 'python',
            's-i': '1',
            'sid': 'ob2ttqkg',
            's-t': 'autocomplete',
            'z_c0': 'Mi4wQUFBQ3hmbTFzUXNBY0FJaVBxVVdDeGNBQUFCaEFsVk5OVjR2V1FETW9QS19PRlExWGVjUHNOcFVoTG5mYWg1LUtR|1493701061|4258e8f2ddc800c42c8b7d94385f578ca97f34d5',
            '__utma': '51854390.213172690.1484833633.1493684449.1493701029.4',
            '__utmb': '51854390.0.10.1493701029',
            '__utmc': '51854390',
            '__utmz': '51854390.1493701029.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic',
            '__utmv': '51854390.100--|2=registration_date=20170502=1^3=entry_date=20170101=1'
        }
        total = []
        for i in range(0,17):
            page = 10 * i
    
            #构造请求的表单,可以定义很多查询内容,如把sort去除,就是默认为时间排序
            data = {"q":"python",
            "sort":"upvote",
            "correction":"0",
            "type":"content",
            "offset":page}
            r = requests.get("https://www.zhihu.com/r/search?", headers=headers, data=data, cookies=cookies)
    
            #请求所得的是一个json,但是这个json中还包含了html标签,所以还需要用beautifulsoup给提取出来
            totaldata = json.loads(r.text)
    
    
            for one in totaldata["htmls"]:
                item = {}
                soup = BeautifulSoup(one, "lxml")
    
                item["title"] = soup.find("div", {"class": "title"}).get_text() #标题
                try:
                    item["author"] = soup.find("span", {"class": "author-link-line"}).get_text() #作者名
                except:
                    item["author"] = "匿名用户"
    
                try:
                    item["bio"] = soup.find("span", {"class": "bio"}).get_text() #标签
                except:
                    item["bio"] = "没有介绍"
    
                try:
                    item["like"] = soup.find("span", {"class": "count"}).get_text() #点赞数
                except:
                    item["like"] = 0
    
                try:
                    item["comment"] = soup.find("a", {"class": "action-item js-toggleCommentBox"}).get_text() #评论数
                except:
                    item["comment"] = 0
    
                try:
                    item["zaiyao"] = soup.find("div", {"class": "summary hidden-expanded"}).get_text()
                except:
                    item["zaiyao"] = "没有回答"
    
                total.append(item)
        return total
    
    # 创建一个数据库表来保存信息
        #连接数据库
    def mysql_conn():
        conn=MySQLdb.connect(
            host='127.0.0.1',
            user = "root",
            passwd = "882645",
            charset = "utf8",
            db='Lagou',
            port = 3306
        )
        return conn
    
    #建表
    def create_table():
        create_sql='''
        CREATE TABLE `zhihu`(
        id INT(11) NOT NULL AUTO_INCREMENT,
        TITLE VARCHAR(855),
        LIKES INT(255),
        AUTHOR VARCHAR(255),
        BIO VARCHAR(255),
        ZAIYAO VARCHAR(855),
        COMMENTS VARCHAR(255),
        PRIMARY KEY (`id`)
        )ENGINE=INNODB DEFAULT CHARSET =utf8
        '''
        conn=mysql_conn()
        with conn:
            cursor = conn.cursor()
            cursor.execute(create_sql)
            conn.commit()
    
    #插入记录
    def insert(item):
        sql='insert into zhihu (TITLE,LIKES,AUTHOR,BIO,ZAIYAO,COMMENTS) values(%s,%s,%s,%s,%s,%s)'
        conn = mysql_conn()
        with conn:
            cursor = conn.cursor()
            try:
                cursor.execute(sql,(item['title'],item['like'],item['author'],item['bio'],item['zaiyao'],item['comment']))
    
                cursor.connection.commit()
    
            except BaseException as e:
                print u"错误在这里>>>>", e, u"<<<<错误在这里"
    
            conn.commit()
    
    if __name__ == '__main__':
        create_table()
        a = getdata()
        for i in a:
            insert(i)
        print u"处理完毕"
    

    相关文章

      网友评论

        本文标题:2017/5/3 爬取知乎搜索结果

        本文链接:https://www.haomeiwen.com/subject/xgrltxtx.html