import使用到的库
#coding=utf8
import sys
import jieba.posseg as pseg
import jieba
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import multiprocessing
分词类
对中文语料进行分词预处理
class Seg:
def __init__(self, corpus_file, seg_file):
self.corpus_file = corpus_file
self.seg_file = seg_file
def seg(self):
with open(self.corpus_file, "r") as fin:
with open(self.seg_file, "w") as fout:
for line in fin.readlines():
line = line.strip()
if line == "":
continue
segs = jieba.cut(line)
fout.write(" ".join(segs) + "\n")
训练词向量
class GenSim:
def __init__(self, input_text, model, word_vector):
self.input_text = input_text
self.model = model
self.word_vector = word_vector
def train(self):
#size:词向量维度
#window:上下文窗口大小
model = Word2Vec(LineSentence(self.input_text), size = 512, window = 5, min_count = 5, workers = multiprocessing.cpu_count())
#存储模型
model.save(self.model)
#存储词向量
model.wv.save_word2vec_format(self.word_vector, binary=False)
主函数
if __name__ == "__main__":
s = Seg(sys.argv[1], sys.argv[2])
s.seg()
g = GenSim(sys.argv[2], sys.argv[3], sys.argv[4])
g.train()
测试
#coding=utf8
import sys
from gensim.models import Word2Vec
model = Word2Vec.load(sys.argv[1])
words = ["初一", "初二"]
for i in range(0, len(words)):
res = model.most_similar(words[i])
print(words[i])
print(res)
网友评论