python 对一篇文章,按逗号分成一句一句的,然后在这篇文章中找到与某个句子类似的句子(包含相同的词)

时间:2021-07-29 12:23:34
#-*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("gbk")
#code:myhaspl@qq.com
#12-4.py

import numpy as np
import jieba
import copy


def get_cossimi(x,y):
    myx=np.array(x)
    myy=np.array(y)
    cos1=np.sum(myx*myy)
    cos21=np.sqrt(sum(myx*myx))
    cos22=np.sqrt(sum(myy*myy))
    return cos1/float(cos21*cos22)




f1_text='瑞典税务局改称*为中国一省:按国际惯例修正'


    
if __name__ == '__main__':



   f1 = file('testk.txt','r')
   lines = f1.read()  
   
   #lines=lines.split(u'。')
   lines=lines.split(u',')

   #lines=re.split(',', lines)
 #for
   for  i  in  lines :
    #print i
    #print "ok"
    if not len(i) ==1 :
     f1_seg_list = jieba.cut(f1_text)

    #第一个待测试数据

     ftest1_seg_list = jieba.cut(i)


    #读取样本文本
    #去除停用词,同时构造样本词的字典
     f_stop = open('stopwords.txt')  
     try:  
        f_stop_text = f_stop.read( )
        f_stop_text=unicode(f_stop_text,'utf-8')
     finally:  
        f_stop.close( )
     f_stop_seg_list=f_stop_text.split('\n')

     test_words={}
     all_words={}
     for  myword in f1_seg_list:
        #print ".",
        if not(myword.strip() in f_stop_seg_list):
            test_words.setdefault(myword,0)
            all_words.setdefault(myword,0)
            all_words[myword]+=1
            
            
    #读取待测试文本
     mytest1_words=copy.deepcopy(test_words)
     for  myword in ftest1_seg_list:
        #print ".",
        if not(myword.strip() in f_stop_seg_list):
            if mytest1_words.has_key(myword):
                mytest1_words[myword]+=1
    
 

            
    #计算样本与待测试文本的余弦相似度
     sampdata=[]
     test1data=[]


     for key in all_words.keys():
        sampdata.append(all_words[key])
        test1data.append(mytest1_words[key])


     test1simi=get_cossimi(sampdata,test1data)

    
     print "%s   %f  %s "%(chr(10)+i+u'。'+chr(10),test1simi,chr(10))

    
    else:
       continue          
    
                
   f1.close()