lucene实现索引的创建与检索;ikanalyzer实现对中文的分词;光到这里已经能够实现中文的检索了,但是光这样还不够,很多项目中的检索,应该还能够对同义词进行处理,比如索引库中有“计算机”,“电脑”这样的词条,搜索“笔记本”应该也能把“计算机”,“电脑”这样的词条匹配出来,这就涉及到对同义词的索引检索了。
两种方案:
1、在建立索引时,拆词建索引时就把同义词考虑进去,将同义词的词条加入到索引中,然后检索时,直接根据输入拆词来检索
2、在建立索引时,不对同义词进行任何处理,在检索时,先拆词,针对拆分出来的词元(呵呵,自创的称呼)也即关键字,进行同义词匹配,把匹配好的同义词拼成一个新的关键字,搜索索引时根据此关键字来进行检索。
个人觉得,方案二更优于方案一,理由如下:在建立索引时,就处理同义词,一方面会增加索引库的容量,导致索引效率的降低;其次,如果后期对同义词进行了扩展,比如原来,一个单词有2个同义词,后面增加到3个,就需要对索引进行重建了,比较麻烦!
大致代码如下:
lucene版本:4.10.3,ikanalyzer:IKAnalyzer2012_hf.jar
lucene每个版本变化貌似蛮大啊,方法之类的都改了好多了,不知道大家有这种感觉没
创建索引:
/**
* MyIndexer.java
* V1.0
* 2015-1-28-下午8:53:37
* Copyright (c) 宜昌***有限公司-版权所有
*/
package com.x.same;
import java.io.File;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MergePolicy.OneMerge;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.lionsoul.jcseg.analyzer.JcsegAnalyzer4X;
import org.lionsoul.jcseg.core.JcsegTaskConfig;
import org.wltea.analyzer.lucene.IKAnalyzer;
/**
* 此类描述的是:
* @author yax 2015-1-28 下午8:53:37
* @version v1.0
*/
public class MyIndexer {
public static void createIndex(String indexPath) throws IOException{
Directory directory = FSDirectory.open(new File(indexPath));
Analyzer analyzer = new IKAnalyzer();
//IKAnalyzer analyzer = new IKAnalyzer();
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_10_3, analyzer);
IndexWriter indexWriter = new IndexWriter(directory, config);
Document document1 = new Document();
document1.add(new TextField("title", "thinkpad超极本笔记本中的战斗机", Store.YES));
indexWriter.addDocument(document1);
Document document2 = new Document();
document2.add(new TextField("title", "用户可以在这里配置自己的扩展字典", Store.YES));
indexWriter.addDocument(document2);
Document document3 = new Document();
document3.add(new TextField("title", "您可以参考分词器源码", Store.YES));
indexWriter.addDocument(document3);
Document document4 = new Document();
document4.add(new TextField("title", "第一台计算机是美国军方定制,专门为了计算弹道和射击特性表面而研制的,承担开发任务的“莫尔小组”由四位科学家和工程师埃克特、莫克利、戈尔斯坦、博克斯组成。1946年这台计算机主要元器件采用的是电子管。该机使用了1500" +
"个继电器,18800个电子管,占地170m2,重量重达30多吨,耗电150KW,造价48万美元。这台计算机每秒能完成5000次加法运算,400次乘法运算,比当时最快的计算工具快300倍,是继电器计算机的1000倍、手工计算的20万倍。" +
"用今天的标准看,它是那样的“笨拙”和“低级”,其功能远不如一只掌上可编程计算器,但它使科学家们从复杂的计算中解脱出来,它的诞生标志着人类进入了一个崭新的信息革命时代。", Store.YES));
indexWriter.addDocument(document4);
indexWriter.close();
}
}
/**
* AnalyzerUtil.java
* V1.0
* 2015-1-28-下午8:42:24
* Copyright (c) 宜昌**有限公司-版权所有
*/
package com.x.same;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.synonym.SynonymFilterFactory;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.FilesystemResourceLoader;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
import org.wltea.analyzer.lucene.IKAnalyzer;
/**
* 此类描述的是:
* @author yax 2015-1-28 下午8:42:24
* @version v1.0
*/
public class AnalyzerUtil {
/**
*
* 此方法描述的是:进行中文拆分
*/
public static String analyzeChinese(String input, boolean userSmart) throws IOException{
StringBuffer sb = new StringBuffer();
StringReader reader = new StringReader(input.trim());
IKSegmenter ikSeg = new IKSegmenter(reader, userSmart);// true 用智能分词 ,false细粒度
for (Lexeme lexeme = ikSeg.next(); lexeme != null; lexeme = ikSeg.next()) {
sb.append(lexeme.getLexemeText()).append(" ");
}
return sb.toString();
}
/**
*
* 此方法描述的是:针对上面方法拆分后的词组进行同义词匹配,返回TokenStream
*/
public static TokenStream convertSynonym(String input) throws IOException{
Version ver = Version.LUCENE_4_10_3;
Map<String, String> filterArgs = new HashMap<String, String>();
filterArgs.put("luceneMatchVersion", ver.toString());
filterArgs.put("synonyms", "config/synonyms.txt");
filterArgs.put("expand", "true");
SynonymFilterFactory factory = new SynonymFilterFactory(filterArgs);
factory.inform(new FilesystemResourceLoader());
Analyzer whitespaceAnalyzer = new WhitespaceAnalyzer();
TokenStream ts = factory.create(whitespaceAnalyzer.tokenStream("someField", input));
return ts;
}
/**
*
* 此方法描述的是:将tokenstream拼成一个特地格式的字符串,交给IndexSearcher来处理
*/
public static String displayTokens(TokenStream ts) throws IOException
{
StringBuffer sb = new StringBuffer();
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while (ts.incrementToken())
{
String token = termAttr.toString();
sb.append(token).append(" ");
System.out.print(token+"|");
// System.out.print(offsetAttribute.startOffset() + "-" + offsetAttribute.endOffset() + "[" + token + "] ");
}
System.out.println();
ts.end();
ts.close();
return sb.toString();
}
public static void main(String[] args) {
String indexPath = "D:\\search\\test";
String input = "超级";
System.out.println("**********************");
try {
String result = displayTokens(convertSynonym(analyzeChinese(input, true)));
//MyIndexer.createIndex(indexPath);
List<String> docs = MySearcher.searchIndex(result, indexPath);
for (String string : docs) {
System.out.println(string);
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ParseException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
检索索引相关类:
/**
* MySearcher.java
* V1.0
* 2015-1-28-下午9:02:32
* Copyright (c) 宜昌**有限公司-版权所有
*/
package com.x.same;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.lionsoul.jcseg.analyzer.JcsegAnalyzer4X;
import org.lionsoul.jcseg.core.JcsegTaskConfig;
/**
* 此类描述的是:
* @author yax 2015-1-28 下午9:02:32
* @version v1.0
*/
public class MySearcher {
public static List<String> searchIndex(String keyword, String indexPath) throws IOException, ParseException{
List<String> result = new ArrayList<>();
IndexSearcher indexSearcher = null;
IndexReader indexReader = DirectoryReader.open(FSDirectory.open(new File(indexPath)));
indexSearcher = new IndexSearcher(indexReader);
Analyzer analyzer = new WhitespaceAnalyzer();
QueryParser queryParser = new QueryParser("title", analyzer);
Query query = queryParser.parse(keyword);
TopDocs td = indexSearcher.search(query, 10);
for (int i = 0; i < td.totalHits; i++) {
Document document = indexSearcher.doc(td.scoreDocs[i].doc);
result.add(document.get("title"));
}
return result;
}
}
同义词文件格式:
我,俺,hankcs
似,is,are => 是
好人,好心人,热心人
超极本,计算机,电脑
第1、3、4行是同义词,第二行表示对似,is,are进行是的转换,即纠错功能
发现了一种更简洁的做法,即把关键词分词,转换同义词,合并的过程交给一个自定义Analyzer去处理,代码如下:
public class IKSynonymsAnalyzer extends Analyzer {
@Override
protected TokenStreamComponents createComponents(String arg0, Reader arg1) {
Tokenizer token=new IKTokenizer(arg1, false);//开启智能切词
Map paramsMap=new HashMap();
paramsMap.put("luceneMatchVersion", "LUCENE_43");
paramsMap.put("synonyms", "C:\\同义词\\synonyms.txt");
SynonymFilterFactory factory=new SynonymFilterFactory(paramsMap);
FilesystemResourceLoader loader = new FilesystemResourceLoader();
try {
factory.inform(loader);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return new TokenStreamComponents(token, factory.create(token));
}
}
谢谢 Spring_LGF给我的启示,参考地址:http://blog.csdn.net/winnerspring/article/details/37567739
代码仅做参考,欢迎大家交流讨论指正。
转载请注明出处