美文网首页
python爬虫之爬简书热门内容

python爬虫之爬简书热门内容

作者: 蝈蝈大人 | 来源:发表于2017-03-28 15:18 被阅读0次

    初学爬虫,简单讲下,上面代码爬的是简书30日热门内容,运用到了抓包所以能够爬取任意页面的内容,不过在下目前还不会封装,有些粗糙,凑合着看吧

    #!/usr/bin/env python
    # -*- coding: utf-8 -*-
    import requests
    import time
    from bs4 import BeautifulSoup
    import os
    import codecs
    import csv
    headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
    url = 'http://www.jianshu.com/trending/monthly?seen_snote_ids%5B%5D=9417518&seen_snote_ids%5B%5D=9975670&seen_snote_ids%5B%5D=9983984&seen_snote_ids%5B%5D=9707970&seen_snote_ids%5B%5D=9650477&seen_snote_ids%5B%5D=10065620&seen_snote_ids%5B%5D=10239288&seen_snote_ids%5B%5D=9917498&seen_snote_ids%5B%5D=10066091&seen_snote_ids%5B%5D=10050042&seen_snote_ids%5B%5D=9417837&seen_snote_ids%5B%5D=10133511&seen_snote_ids%5B%5D=9587458&seen_snote_ids%5B%5D=10189199&seen_snote_ids%5B%5D=10443321&seen_snote_ids%5B%5D=10094112&seen_snote_ids%5B%5D=10270938&seen_snote_ids%5B%5D=9654829&seen_snote_ids%5B%5D=8446458&seen_snote_ids%5B%5D=10465818&page='
    for a in range(1,100):#爬去前100页的内容
        new_url = url + str(a)
        page = requests.get(new_url,headers=headers)
        soup = BeautifulSoup(page.text,'lxml')
        text = soup.find_all('div',{'class':'content'})
        for i in text:
            authorName = i.find_all('a')[1].get_text() #获得作者名字
            pageTitle = i.find_all('a')[2].get_text()#获得标题
            peopleRead = i.find_all('a')[3].get_text()#获得阅读量
            peopleRead = peopleRead.strip()
            peopleComment = i.find_all('a')[4].get_text()#获得评论数
            peopleComment = peopleComment.strip()
            peopleLike = i.find_all('span')[1].get_text()#获得点赞数
            peopleLike = peopleLike.strip()
            pageAbstract = i.find('p', {'class': 'abstract'}).get_text()#获得摘要
            pageAbstract = pageAbstract.strip()
            getMoney = i.find_all('span')[-1].get_text()  #获得赞赏数( 由于是最后一个了,所以很报错,只能改为逆向)
    
            authorName = list(authorName.strip().split(','))#生成一维列表
            pageTitle = list(pageTitle.strip().split(','))
            peopleRead= list(peopleRead.strip().split(','))
            peopleComment = list(peopleComment.strip().split(','))
            peopleLike = list(peopleLike.strip().split(','))
            pageAbstract = list(pageAbstract.strip().split(','))
            getMoney = list(getMoney.strip().split(','))
            item = [ [a,b,c,d,e,f,g] for a,b,c,d,e,f,g in zip(authorName,pageTitle,pageAbstract,peopleRead,peopleComment,peopleLike,getMoney)]#将七个列表合成一个二维表
            #print(item)
            #item_name = ['作者', '标题', '摘要', '阅读量', '评论数', '点赞数', '赞赏数']
            with open('简书.csv', 'a+', newline='',encoding="utf_8_sig") as csvfile: #newline=''解决新加入的内容有一行空行
                csvfile.write("\xef\xbb\xbf")#解决乱码问题
                spamwriter = csv.writer(csvfile, delimiter=' ',
                                        quotechar='|', quoting=csv.QUOTE_MINIMAL)
                spamwriter.writerow(item)
    
    time.sleep(1)#设置间隔时间,防止被封IP
    
    
    

    相关文章

      网友评论

          本文标题:python爬虫之爬简书热门内容

          本文链接:https://www.haomeiwen.com/subject/unvjottx.html