# -*- coding: utf-8 -*-
import gensim
import os
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, fname)):
yield line.split()
sentences = MySentences('D:/bigdata/word2vec/sens') #sens是文件夹名,把分好词的文件放在文件夹里即可
#文件用utf-8编码,第一行留空
model = gensim.models.Word2Vec(sentences,min_count = 0,size =50,workers=2)
#第一个参数可以对字典做截断. 少于min_count次数的单词会被丢弃掉, 默认值为5
#worker参数只有在安装了Cython后才有效. 没有Cython的话, 只能使用单核
#######################
#下面几步均可以直接注释
#model.save('c:/sens/mymodel') #存储模型,sens也是一个文件夹名,mymodel就是生成的文件名
#new_model = gensim.models.Word2Vec.load('c:/sens/mymodel') #加载模型
#model = model.save_word2vec_format('/sens/vectors.txt', binary=False) #存储C下的模型
#model=model.load_word2vec_format('/sens/vectors.bin.gz', binary=True) #加载c下的模型
#model = gensim.models.Word2Vec.load('/sens/mymodel') #加载模型并添加句子继续训练
#more_sentences = MySentences('C:/sens/')
#model.train(more_sentences)
#######################
for i in model.most_similar('可爱',topn=10): #找出最近n各单词
for k in i:
print k
print '\n'
print model.similarity('可爱','天真') #查看两个单词距离
print '\n'
print model.most_similar(positive=['女王', '公主'], negative=['国王'], topn=1)[0][0]#单词相加减
print '\n'
print model.n_similarity(["天真","可爱"],["丑陋","卑鄙"]) #查看两组单词距离
print '\n'
print model.doesnt_match("天真 善良 丑陋".split()) #找出不相近单词