import jieba
import pandas as pd
txt = open("hlm.txt", "r", encoding="gb18030").read()
from gensim.models.word2vec import Word2Vec
sentences =txt
tt=txt
jieba.enable_parallel(2)
s1 = [x for x in jieba.cut_for_search(tt) if len(x) >= 2]
jieba.disable_parallel()
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
path = get_tmpfile("word2vec.model")
model = Word2Vec(s1, size=100, window=5, min_count=1, workers=4)
model.save("word2vec.model")
model = Word2Vec.load("word2vec.model")
'''
model.train([["hello", "world"]], total_examples=1, epochs=1)
'''
vector = model.wv['礼义']
print('#'*100)
print(vector)