image.png今天师姐要我帮她爬微博数据...
image.png
因为师姐有时间的限制
因此我们仔细观察微博的状态
发现这里有一个高级搜索
image.png
微博还是很贴心的
image.png
居然只能爬50页??
我收回我刚才的话,哼~
然后网上发现有小哥哥写好的代码.....
copy 起来
链接丢在这里https://www.jianshu.com/p/28553c1789f9
等等好像有些地方不一样,我们改改
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2020-02-28 13:32:33
# @Author : Muxiaoxiong
# @email : xiongweinie@foxmail.com
import time
import xlrd
import xlwt
from xlutils.copy import copy
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def write_excel_xls(path, sheet_name, value):
index = len(value) # 获取需要写入数据的行数
workbook = xlwt.Workbook() # 新建一个工作簿
sheet = workbook.add_sheet(sheet_name) # 在工作簿中新建一个表格
for i in range(0, index):
for j in range(0, len(value[i])):
sheet.write(i, j, value[i][j]) # 像表格中写入数据(对应的行和列)
workbook.save(path) # 保存工作簿
def write_excel_xls_append(path, value):
index = len(value) # 获取需要写入数据的行数
workbook = xlrd.open_workbook(path) # 打开工作簿
sheets = workbook.sheet_names() # 获取工作簿中的所有表格
worksheet = workbook.sheet_by_name(sheets[0]) # 获取工作簿中所有表格中的的第一个表格
rows_old = worksheet.nrows # 获取表格中已存在的数据的行数
new_workbook = copy(workbook) # 将xlrd对象拷贝转化为xlwt对象
new_worksheet = new_workbook.get_sheet(0) # 获取转化后工作簿中的第一个表格
for i in range(0, index):
for j in range(0, len(value[i])):
new_worksheet.write(i+rows_old, j, value[i][j]) # 追加写入数据,注意是从i+rows_old行开始写入
new_workbook.save(path) # 保存工作簿
def read_excel_xls(path):
workbook = xlrd.open_workbook(path) # 打开工作簿
sheets = workbook.sheet_names() # 获取工作簿中的所有表格
worksheet = workbook.sheet_by_name(sheets[0]) # 获取工作簿中所有表格中的的第一个表格
for i in range(0, worksheet.nrows):
for j in range(0, worksheet.ncols):
print(worksheet.cell_value(i, j), "\t", end="") # 逐行逐列读取数据
print()
def spider(startime,endtime,book_name_xls,sheet_name_xls,keywords,maxWeibo):
driver = webdriver.Chrome()
driver.set_window_size(1400, 800)
driver.get("https://s.weibo.com/weibo?q=%s&typeall=1&suball=1×cope=custom:%s:%s&Refer=g&display=0&retcode=6102"%(keywords,startime,endtime))
for i in range(60):
time.sleep(1)
print('进入等待时间,剩余时间为%s'%(60-i),end='\r',)
value_title = [["rid", "用户名称", "微博内容", "微博转发量","微博评论量","微博点赞","发布时间","搜索关键词","原创or转发","文本类型"],]
write_excel_xls(book_name_xls, sheet_name_xls, value_title)
rid = 0
page=0
while rid<maxWeibo:
page +=1
driver.get("https://s.weibo.com/weibo?q=%s&typeall=1&suball=1×cope=custom:%s:%s&Refer=g&page=%s"%(keywords,startime,endtime,page))
elems=driver.find_elements_by_css_selector('div.card')
for elem in elems:
try:
rid = rid + 1
#用户名
weibo_username = elem.find_elements_by_css_selector('a.name')[0].text
#微博内容
#分为有全文和无全文
weibo_content = elem.find_elements_by_css_selector('p.txt')[0].text
shares = elem.find_elements_by_css_selector('div.card-act > ul > li:nth-child(2)')[0].text
shares=shares.replace("转发","")
comments = elem.find_elements_by_css_selector('div.card-act > ul > li:nth-child(3)')[0].text
comments=comments.replace("评论","")
likes = elem.find_elements_by_css_selector('div.card-act > ul > li:nth-child(4)')[0].text
try:
elem.find_element_by_css_selector('div[node-type="feed_list_media_prev"]')
content_type='文字+图片'
except:
content_type='文字'
try:
elem.find_element_by_css_selector('div.con')
yczf='转发'
except:
yczf='原创'
#发布时间
weibo_time = elem.find_elements_by_css_selector('p.from > a:nth-child(1)')[0].text
print("正在爬取第%s"%rid)
value1 = [[rid, weibo_username,weibo_content, shares,comments,likes,weibo_time,keywords,yczf,content_type],]
write_excel_xls_append(book_name_xls, value1)
except:
pass
if __name__ == '__main__':
startime = "2019-09-01-0" #开始时间
endtime = "2019-11-30-0" #结束时间
book_name_xls = "G:/selenium+爬虫/爬取NCBI/weibo.xls" #填写你想存放excel的路径,没有文件会自动创建
sheet_name_xls = '微博数据' #sheet表名
maxWeibo = 20000 #设置最多多少条微博,如果未达到最大微博数量可以爬取当前已解析的微博数量
keywords = "抑郁" #输入你想要的关键字,建议有超话的话加上##,如果结果较少,不加#
spider(startime,endtime,book_name_xls,sheet_name_xls,keywords,maxWeibo)
又是开心的一天~
网友评论