如何简便使用ikanalyzer
1个回答
展开全部
IKAnalyzer 用来对一段文集进行分词
IKAnalyzer.cfg.xml 文件必须放在classpath的根目录下,可以在源码中修改这个配置文件的位置,但是没有必要去修改。
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置</comment>
<entry key="ext_dict">/ikdic/ext_keyword.dic;</entry>//在classpath根目录下的ikdic包下的ext_keyword.dic,为扩展字典
<entry key="ext_stopwords">/ikdic/ext_stopword.dic</entry> //在classpath根目录下的ikdic包下的ext_stopword.dic,为停止字典
</properties>
至于调用是很简单的:
package org.bdp.util;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
public class CZIKAnaUtil {
public static void main(String[] args) {
String str = "文嘉(1501-1583)";
IKAnalysis(str);
}
public static List<String> IKAnalysis(String str) {
List<String> keywordList = new ArrayList<String>();
try {
byte[] bt = str.getBytes();
InputStream ip = new ByteArrayInputStream(bt);
Reader read = new InputStreamReader(ip);
IKSegmenter iks = new IKSegmenter(read,true);//true开启只能分词模式,如果不设置默认为false,也就是细粒度分割
Lexeme t;
while ((t = iks.next()) != null) {
keywordList.add(t.getLexemeText());
}
} catch (IOException e) {
e.printStackTrace();
}
/* for (int i = 0; i < keyWordList.size() - 1; i++) {
for (int j = keyWordList.size() - 1; j > i; j--) {
if (keyWordList.get(j).equals(keyWordList.get(i))) {
keyWordList.remove(j);
}
}
}
Collections.sort(keyWordList, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o2.length() - o1.length();
}
})*/
System.out.println(keywordList);
return keywordList;
}
}
IKAnalyzer.cfg.xml 文件必须放在classpath的根目录下,可以在源码中修改这个配置文件的位置,但是没有必要去修改。
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置</comment>
<entry key="ext_dict">/ikdic/ext_keyword.dic;</entry>//在classpath根目录下的ikdic包下的ext_keyword.dic,为扩展字典
<entry key="ext_stopwords">/ikdic/ext_stopword.dic</entry> //在classpath根目录下的ikdic包下的ext_stopword.dic,为停止字典
</properties>
至于调用是很简单的:
package org.bdp.util;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
public class CZIKAnaUtil {
public static void main(String[] args) {
String str = "文嘉(1501-1583)";
IKAnalysis(str);
}
public static List<String> IKAnalysis(String str) {
List<String> keywordList = new ArrayList<String>();
try {
byte[] bt = str.getBytes();
InputStream ip = new ByteArrayInputStream(bt);
Reader read = new InputStreamReader(ip);
IKSegmenter iks = new IKSegmenter(read,true);//true开启只能分词模式,如果不设置默认为false,也就是细粒度分割
Lexeme t;
while ((t = iks.next()) != null) {
keywordList.add(t.getLexemeText());
}
} catch (IOException e) {
e.printStackTrace();
}
/* for (int i = 0; i < keyWordList.size() - 1; i++) {
for (int j = keyWordList.size() - 1; j > i; j--) {
if (keyWordList.get(j).equals(keyWordList.get(i))) {
keyWordList.remove(j);
}
}
}
Collections.sort(keyWordList, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o2.length() - o1.length();
}
})*/
System.out.println(keywordList);
return keywordList;
}
}
本回答被提问者和网友采纳
已赞过
已踩过<
评论
收起
你对这个回答的评价是?
推荐律师服务:
若未解决您的问题,请您详细描述您的问题,通过百度律临进行免费专业咨询