word2vec
from gensim.models import word2vec
# 训练模型
model = word2vec.Word2Vec(sentences, size=300, window=2, min_count=1)
model.wv.save_word2vec_format('model/comment.zh.vec', binary=False)
训练结果测试
#计算两个词语相似度
model.wv.similarity('好看','不错')
#获得相似度最高的前n个词
model.wv.most_similar('好看',topn=100)
#词向量存储在model.wv的KeyedVectors实例中,可以直接在KeyedVectors中查询词向量。
model.wv['裤子']
#
model.wv.most_similar(positive=['女式', '羽绒服'], negative=['男士'],topn=7)
[('羽绒衣', 0.7207906246185303),...]
#
model.wv.most_similar_cosmul(positive=['女式', '羽绒服'], negative=['男士'],topn=7)
[('羽绒衣', 0.9567407369613647),...]
#挑出不符合的词
model.wv.doesnt_match("外套 裤子 男士".split())
'男士'
#计算文本概率
model.score(["The fox jumped over a lazy dog".split()])
#报告中心词的概率分布,给定上下文词作为训练模型的输入。返回最常用的单词及其概率
model.predict_output_word(['棉服','男士','羽绒服'], topn=10)
模型保存和加载
#方法一
model.save('text8.model')
model1 = word2vec.Word2Vec.load('text8.model')
#方法二,待测试
# C text format
from gensim.keyedvectors import KeyedVectors
model.wv.save_word2vec_format('model/comment.zh.txt', binary=False)
model1 = KeyedVectors.load_word2vec_format('model/comment.zh.txt', binary=False)
#方法三,待测试
# C binary format
model.wv.save_word2vec_format('model/text.model.bin', binary=True)
model1 = KeyedVectors.load_word2vec_format('model/text.model.bin', binary=True)
如果模型训练完成(不再更新),可以在wv中转换gensim.models.KeyedVectors实例来避免不必要的内存消耗
doc2vec
from gensim.models.doc2vec import Doc2Vec,LabeledSentence
TaggededDocument = gensim.models.doc2vec.TaggedDocument
# doc2vec
train = []
comment_list=[]
score_list =[]
for s in raw_sentences:
comment_list.append(s[1])
score_list.append(s[2])
document = TaggededDocument(s[1], tags=s[2])
train.append(document)
size =400
model_dm = Doc2Vec(train,min_count=1, window = 3, size = size, sample=1e-3, negative=5, workers=4)
model_dm.save('model/model_dm_doc2vec')
inferred_vector_dm = model_dm.infer_vector(test_text)
sims = model_dm.docvecs.most_similar([inferred_vector_dm], topn=10)
网友评论