本文要解决的问题是如何给校园网推送的文章起个“亲人”并且“受欢迎”的名字。
词频反映了词汇的亲人度,而聚类分析则把受欢迎的题目筛选出来。
通过Python爬取校园网特定栏目文章的标题、阅读量和回复量,然后用jieba分词分好所有的文章标题,最后对文章的阅读量和回复量进行聚类分析,得到结果。同时,也爬取所有流行公众号咪蒙的文章题目作为一个参考。
全文分为三部分:
1 通过python 爬取校园网数据和传送门咪蒙的所有文章标题数据放入本地放入本地
2 画出二者的云图
3 对校园网标题做聚类分析
本篇文章就第一部分进行阐述。
第一部分:通过python 爬取校园网数据放入本地
#爬取'睿思 文章天地 所有的文章、阅读量、回复量'
from bs4 import BeautifulSoup
import requests
import re
import csv
import time
import codecs
# 访问账号
headers = { # 添加header可以将程序伪装成浏览器
"Host": "www.kuaidaili.com",
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
data = []
# 睿思网文章天地从第一页到最后一页
urls = ['http://rs.xidian.edu.cn/forum.php?mod=forumdisplay&fid=549&page={}'.format(str(i)) for i in range(1,15,1)]
# 编写函数。输入url,获得所有文章标题、阅读量和回复量
def get_attractions(url):
wb_data = requests.get(url,headers=headers)
soup = BeautifulSoup(wb_data.text,'lxml')
#缓冲时间
time.sleep(2)
#获取题目、阅读量和回复量
titles = soup.select('tbody > tr > th > a.s.xst')
nums_readers = soup.select('tbody > tr > td.num > em ')
nums_backs = soup.select('tbody > tr > td.num > a')
for title, num_read, num_back in zip(titles, nums_readers, nums_backs):
info = {
'title': title.get_text(),
'num_read': num_read.get_text(),
'num_back': num_back.get_text()
}
data.append(info)#存入字典,并附入data中
#for循环取出
for single_url in urls:
get_attractions(single_url)
#存入本地csv文件
with codecs.open('/Users/zhangyi/Desktop/2.csv', 'w+', 'utf_8_sig') as csv_file:
#csv_file.write(codecs.BOM_UTF8)
headers = [k for k in data[0]]
writer = csv.DictWriter(csv_file, fieldnames=headers)
writer.writeheader()
for dictionary in data:
writer.writerow(dictionary)
文章标题、阅读量和回复量
通过python 爬取传送门咪蒙的所有文章标题数据放入本地
# 爬取咪蒙的文章题目
from bs4 import BeautifulSoup
import requests
import csv
import time
import codecs
# 访问账号
headers = { # 添加header可以将程序伪装成浏览器
"Host": "www.kuaidaili.com",
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
nums = []
data = []
# 咪蒙的传送门所有页面
urls_mm = ['http://chuansong.me/account/mimeng7?start={}'.format(str(i)) for i in range(0,624,12)]
# 编写函数。输入url,获得所有文章标题、url和时间
def get_attractions(url):
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
time.sleep(2)
titles = soup.select('a.question_link')
time_post = soup.select('span.timestamp')
for link in titles:
nums.append(link.get('href').split('//')[0])
urls = ['http://chuansong.me{}'.format(str(i)) for i in nums]
for title, time_start, url_s in zip(titles, time_post, urls):
info = {
'title': title.get_text(),
'time_start': time_start.get_text(),
'url_s': url_s
}
data.append(info)
return data
#for循环取出
for single_url in urls_mm:
get_attractions(single_url)
#存入本地csv文件
with codecs.open('/Users/zhangyi/Desktop/csvs/mm.csv', 'w+', 'utf_8_sig') as csv_file:
#csv_file.write(codecs.BOM_UTF8)
heads = [k for k in data[0]]
writer = csv.DictWriter(csv_file, fieldnames=heads)
writer.writeheader()
for dictionary in data:
writer.writerow(dictionary)
咪蒙文章标题
网友评论