# coding: utf-8
import jieba
from matplotlib import collections, scale
import mysql.connector
from wordcloud import WordCloud
from imageio import imread # 处理图像的函数
import matplotlib.pyplot as plt
#数据库配置
mydb = mysql.connector.connect(
host="11111",
user="root",
passwd="2222",
database="DB"
)
mycursor = mydb.cursor()
def fun_get_user():
sql = "SELECT sName FROM TbUser"
mycursor.execute(sql)
return mycursor.fetchall()
# 读取文本文件
#path = "D:/Study/word.txt" # 存储文本路径位置
#text = open(path, 'r', encoding="utf-8").read()
#读取数据库中的文字
textList = []
results = fun_get_user()
for row in results:
sContent = row[0]
i = 0
for row2 in sContent:
if i>0:
textList.append(str(row2))
else:
i+=1
# 使用jieba库对文本进行分词
text = ','.join(textList)
cut_text = ''.join(jieba.cut(text))
# 读取图片
#color_mask = imread('D:/Study/app2.png')
# 生成词云
wordLen = len(textList)
cloud = WordCloud(font_path='D:/Study/HanYiYanKaiW-2.ttf',#字体文件路径
background_color="white",#这里将白色设定为背景色,即非白色区域将填充词
min_font_size= 14,
width= 1680,
height=1050,
collocations=False,
#mask=color_mask,
max_words=wordLen,#最大词语数
max_font_size=120, scale = 10)#最大词的大小
word_cloud = cloud.generate(cut_text)
# 输出图片
plt.figure()
plt.axis('off')
plt.imshow(word_cloud)
plt.show()
word_cloud.to_file('D:/Study/14.jpg')
网友评论