美文网首页Python爬虫作业
爬美股吧 最终修改

爬美股吧 最终修改

作者: Snow__ | 来源:发表于2017-06-03 23:07 被阅读0次

先上代码

# -*- coding:utf-8 -*-
import requests
from lxml import etree
import csv

import sys

reload(sys)
sys.setdefaultencoding('utf-8')

start_url = "http://guba.eastmoney.com/list,meigu_1.html"
headers = {
    "User-Agent": "Mozilla / 5.0(Windows NT 6.1;Win64;x64)"
                  "AppleWebKit / 537.36(KHTML, likeGecko)"
                  "Chrome / 58.0.3029.110"
                  "Safari / 537.36"
}


# def get_total_page(start_url):
#    html = requests.get(url=start_url, headers=headers).content
#    selector = etree.HTML(html)
#    sum_page = selector.xpath("//span[@class='sumpage']/text()")
#    return sum_page


def parse_title():
    # sum_page = get_total_page(start_url)
    rows = []
    for num in range(1, 23):
        url = "http://guba.eastmoney.com/list,meigu_" + str(num) + ".html"
        html = requests.get(url=url, headers=headers).content
        selector = etree.HTML(html)
        items = selector.xpath("//div[@id='articlelistnew']/div[position()>1 and position()<last()]")
        for item in items:
            title = item.xpath("span[@class='l3']/a/text()")[0].decode(encoding='utf-8')
            author_temp = item.xpath("span[@class='l4']/a/text()") if item.xpath("span[@class='l4']/a/text()") else [
                u'匿名网友']
            author = author_temp[0].decode(encoding='utf-8')
            read = item.xpath("span[@class='l1']/text()")[0]
            comment_num = item.xpath("span[@class='l2']/text()")[0]
            post_time = item.xpath("span[@class='l6']/text()")[0]
            last_update = item.xpath("span[@class='l5']/text()")[0]
            link = item.xpath("span[@class='l3']/a/@href")
            complete_link = 'http://guba.eastmoney.com' + link[0] if str(link[0]).startswith('/') else 'http://guba.eastmoney.com/' + link[0]
            rows.append(
                {'title': title, 'author': author, 'read': read, 'comment_num': comment_num, 'post_time': post_time,
                 'last_update': last_update, 'link': link, 'complete_link': complete_link})
    return rows


def parse_content_comment():
    links = []
    temp = parse_title()
    for item in temp:
        links.append(item['link'][0])
    rows = []
    for link in links[0:8]:
        url = "http://guba.eastmoney.com/" + link
        html = requests.get(url=url, headers=headers).text.decode(encoding='utf-8')
        selector = etree.HTML(html)
        lines = {}
        content = ''
        contents = selector.xpath("//div[@class='stockcodec']/div[@id='zw_body']/p/text()") if selector.xpath("//div[@class='stockcodec']/div[@id='zw_body']/p/text()") else [u'none']
        for item in contents:
            content += item
        lines['content'] = content
        comments = selector.xpath("//div[@id='zwlist']")
        for item in comments:
            if item.xpath("div[@class='zwli clearfix']"):
                name = ''
                names = item.xpath("div/div/div/div[@class='zwlianame']/span/a/text()")
                for na in names:
                    name += na
                comment = ''
                comments = item.xpath("div/div/div/div[@class='zwlitext stockcodec']/text()")
                for co in comments:
                    comment += co.strip()
                time = ''
                times = item.xpath("div/div/div/div[@class='zwlitime']/text()")
                for ti in times:
                    time += ti
                lines['name'] = name
                lines['comment'] = comment
                lines['time'] = time
            else:
                lines['name'] = 'none'
                lines['comment'] = 'none'
                lines['time'] = 'none'
            rows.append(lines)
    for link in links[8:]:
        url = "http://guba.eastmoney.com" + link
        html = requests.get(url=url, headers=headers).text
        selector = etree.HTML(html)
        lines = {}
        content = ''
        contents = selector.xpath("//div[@class='stockcodec']/text()") if selector.xpath("//div[@class='stockcodec']/text()") else [u'none']
        for co in contents:
            content += co.strip()
        lines['content'] = content
        comments = selector.xpath("//div[@id='zwlist']")
        for item in comments:
            if item.xpath("div[@class='zwli clearfix']"):
                name = ''
                names = item.xpath("div/div/div/div[@class='zwlianame']/span/a/text()")
                for na in names:
                    name += na
                comment = ''
                comments = item.xpath("div/div/div/div[@class='zwlitext stockcodec']/text()")
                for co in comments:
                    comment += co
                time = ''
                times = item.xpath("div/div/div/div[@class='zwlitime']/text()")
                for ti in times:
                    time += ti
                lines['name'] = name
                lines['comment'] = comment
                lines['time'] = time
            else:
                lines['name'] = 'none'
                lines['comment'] = 'none'
                lines['time'] = 'none'
            rows.append(lines)
    return rows


if __name__ == "__main__":
    headlines1 = ['title', 'author', 'read', 'comment_num', 'post_time', 'last_update', 'link', 'complete_link']
    headlines2 = ['content', 'name', 'comment', 'time']
    #    get_total_page(start_url)
    rows1 = parse_title()
    rows2 = parse_content_comment()
    with open('eastmoney1.csv', 'wb') as f:
        f_csv = csv.DictWriter(f, headlines1)
        f_csv.writeheader()
        f_csv.writerows(rows1)
    with open('eastmoney2.csv', 'wb') as f:
        f_csv = csv.DictWriter(f, headlines2)
        f_csv.writeheader()
        f_csv.writerows(rows2)

结果

之前不知道为何会乱码,这个星期这个作业就一直在我心里纠缠着我,今天冷静的分析了一下原因,终于发现了问题,别提有多开心了。

问题就出在这里
html = requests.get(url=url, headers=headers).text
html = requests.get(url=url, headers=headers).content

看源码

    @property
    def text(self):
        """Content of the response, in unicode.

        If Response.encoding is None, encoding will be guessed using
        ``chardet``.

        The encoding of the response content is determined based solely on HTTP
        headers, following RFC 2616 to the letter. If you can take advantage of
        non-HTTP knowledge to make a better guess at the encoding, you should
        set ``r.encoding`` appropriately before accessing this property.
        """

    #content的完整代码就不贴了。
    @property
    def content(self):
        """Content of the response, in bytes."""

.text返回的是Unicode型的数据。
.content返回的是bytes型也就是二进制的数据
之前一直用.content来解析所以一直出错。
心头只恨总算除掉了!又涨经验了!

相关文章

  • 爬美股吧 最终修改

    先上代码 之前不知道为何会乱码,这个星期这个作业就一直在我心里纠缠着我,今天冷静的分析了一下原因,终于发现了问题,...

  • 爬美股吧修改1

    第一部分修改后已经能够正常显示,主要问题是不同的帖子有些地方缺省需要补齐,不然不好处理。处理文本的能力还是要加强啊...

  • 交作业 爬美股吧

    作业要求:东方财富网美股吧贴子数据 包含:浏览数、评论数 、帖子标题 、帖子内容 、回复人、 回复时间、 回复内容...

  • 2月26日基金操作指南

    大A最终还是反弹失败了,昨晚美股那边也跌的很惨,真的是:美股大涨我不涨,美股大跌我必跌。 不少基金开始大量赎回,但...

  • 意愿低迷 港美A模拟及实盘 0325

    昨晚,美国SEC已通过《外国公司问责法案》最终修正案,导致中概股大跌,美股3大指数应声倒下。 今天A股上证...

  • 2018年交易总结

    2018年从A股到了美股,算是一大转变吧。在A股并没有形成自己的交易系统,在美股折腾的过程中慢慢建立了一些...

  • 【青石证券】港美股专家市场观点:三万以下可分批买入

    港股跟随美股下跌,一个迟来的调整终于发生,其实美股和港股已经严重超买多时,只差一个调整的借口,最终就因为美国经济数...

  • AH股票配对泛谈

    受美股影响,今天a股开盘高开。持股方面,德力股票最高涨幅7.0%,最终收于4.74%,目前亏8.6%。这次事先猜对...

  • 今日股市1207

    昨夜美股大幅下跌,今日A股走出较强的振荡走势,低开下行后快速上涨后又快速下跌,收盘下跌0.4个点,最终以3199点...

  • 爬吧爬吧小蚂蚁

    蚂蚁在我手臂上绕圈圈 我吹落它它还向我走来 一只肥重的花猫转移了我的注意 它看了我一眼 开始围着咖啡桌上下跳跃 仿...

网友评论

    本文标题:爬美股吧 最终修改

    本文链接:https://www.haomeiwen.com/subject/sjnmfxtx.html