获取股票在东财股吧的评论情况
import time
import datetime
import requests
import re
from bs4 import BeautifulSoup
from collections import OrderedDict
import hashlib
import tushare as ts
import logging
# import pandas as pd
# from pandas import DataFrame
# 最大查几天
rangemax = 7
def sortedDictValues3(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def md5(str):
m = hashlib.md5()
m.update(str)
returnstr = m.hexdigest()
return returnstr.upper()
def matchguba_news(htmlcontent, allpost, allcomment):
"""匹配规则"""
soup = BeautifulSoup(htmlcontent, "lxml")
divs = soup.find_all(u"div", class_="articleh")
divnu = 0
date = ''
for divitem in divs:
a = str(divitem)
# logging.info("====:%s"%a)
if a.find("财经评论") == -1 and a.find("东方财富网") == -1:
match = re.findall(r"<span class=\"l6\">(\d\d-\d\d)</span>", a)
# logging.info(u"lydlyd:%s"%match)
if match:
timetemp = match[0]
date = timetemp.replace('-', '')
olddate = 0
# if allpost.has_key(date):
if date in allpost:
olddate = allpost[date]
allpost[date] = olddate + 1
oldcomment = 0
matchcomment = re.findall(r"<span class=\"l2\">(\d+)</span>",
a)
# logging.info(matchcomment)
# if allcomment.has_key(date):
if date in allcomment:
oldcomment = allcomment[date]
allcomment[date] = oldcomment + int(matchcomment[0])
return date
# 构造 Request headers
agent = 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
headers = {
'User-Agent':
agent,
'Host':
"xueqiu.com",
"Accept":
"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":
"gzip, deflate, sdch, br",
"Accept-Language":
"zh-CN,zh;q=0.8,zh-TW;q=0.6",
"Connection":
"keep-alive"
}
def beginguba(stockname, stockscode, rangemaxgb):
"""开始抓取股吧"""
logging.info("analyse stockcode:%s" % stockscode)
# logging.info("要抓取的股吧股票为:%s" % stockname)
logging.info(datetime.datetime.now())
allpost = OrderedDict()
allcomment = OrderedDict()
for i in range(1, 100):
urla = 'http://guba.eastmoney.com/list,%s,f_%d.html' % (stockscode, i)
# logging.info(u"股吧数据第%d页,url:%s" % (i, urla))
try:
r = requests.get(url=urla)
time.sleep(0.5)
if r.status_code == 200:
curdate = matchguba_news(r.content, allpost, allcomment)
# 检查是否该break掉 (已经抓 到指定天数的数据时就break掉)
if int(curdate) <= int(rangemaxgb[rangemaxgb.__len__() - 1]):
break
time.sleep(1)
except ValueError:
logging.info('analyse stock %s failed'%stockscode)
# newallpost = collections.OrderedDict(sorted(allpost.items(), key=lambda t: t[0]))
if allpost.__len__() > 0:
for datekey in allpost:
if int(datekey) > int(rangemaxgb[rangemaxgb.__len__() - 1]):
logging.info("issued_date:%s,number_of_posts:%d,general_comment_number:%d" %
(datekey, allpost[datekey], allcomment[datekey]))
else:
logging.info(u"no data,or analyse failed.")
def getstocknewscount(stockscode):
tnow = time.localtime(time.time())
rangemaxgb = [time.strftime('%m%d', tnow)]
for i in range(1, rangemax + 1):
t = time.localtime(time.time() - i * 86400)
rangemaxgb.append(time.strftime('%m%d', t))
# logging.info(rangemaxgb)
# 设定抓取范围 抓取X天内的数据。直到抓到为止,最多100页
urlbegin = 'http://guba.eastmoney.com/list,%s.html' % stockscode
r = requests.get(url=urlbegin)
soup = BeautifulSoup(r.content, "lxml")
stockname = soup.title.string.split(u"_")[0]
# 股吧数据获取开始
beginguba(stockname, stockscode, rangemaxgb)
def main():
# 记录到日志里
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='stocknews.log',
filemode='w')
#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info('start guba comment count')
# if len(sys.argv) > 1:
# stocklist = sys.argv[1].split(',')
# for stockcode in stocklist:
# getstocknewscount(stockcode)
# else:
# logging.info(u"输入股票代码!")
# quit()
# 统计所有的股票
logging.info(ts.__version__)
df = ts.get_area_classified()
for stockcode in df['code']:
getstocknewscount(stockcode)
logging.info('finished.')
# stocklist = ['603337','300017','002665','002230','600566','600660','000957','000999','002304']
# stocklist = ['300618', '300648', '603799']
# for stockcode in stocklist:
# getstocknewscount(stockcode)
if __name__ == '__main__':
main()
赞赏是最真诚的认可
网友评论