IK Analyzer是基于lucene实现的分词开源框架
下载路径:http://so.csdn.net/so/search/s.do?q=IKAnalyzer2012.jar&t=doc&o=&s=all&l=null
需要在项目中引入:
IKAnalyzer2012.jar
lucene-core-3.6.0.jar
实现的两种方法:
使用(lucene)实现:
import java.io.IOException;
import java.io.StringReader;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme; public class Fenci1 {
public static void main(String[] args) throws IOException{
String text="你好,我的世界!";
StringReader sr=new StringReader(text);
IKSegmenter ik=new IKSegmenter(sr, true);
Lexeme lex=null;
while((lex=ik.next())!=null){
System.out.print(lex.getLexemeText()+",");
}
} }
使用(IK Analyzer)实现:
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer; public class Fenci {
public static void main(String[] args) throws IOException {
String text="你好,我的世界!";
//创建分词对象
Analyzer anal=new IKAnalyzer(true);
StringReader reader=new StringReader(text);
//分词
TokenStream ts=anal.tokenStream("", reader);
CharTermAttribute term=ts.getAttribute(CharTermAttribute.class);
//遍历分词数据
while(ts.incrementToken()){
System.out.print(term.toString()+",");
}
reader.close();
System.out.println();
} }
运行后结果:
你好,我,的,世界,