当前位置: 首页>>代码示例>>Java>>正文


Java Configuration类代码示例

本文整理汇总了Java中org.wltea.analyzer.cfg.Configuration的典型用法代码示例。如果您正苦于以下问题:Java Configuration类的具体用法?Java Configuration怎么用?Java Configuration使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Configuration类属于org.wltea.analyzer.cfg包,在下文中一共展示了Configuration类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getIKAnalyzerResult

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
public static List<String> getIKAnalyzerResult(String originTxt, boolean useSmart, Collection<String> words)throws Exception{
	if(originTxt == null || originTxt.trim().equals("")){
		return null;
	}
	
	//如下代码为动态增加新词的办法,可以完善成动态加载的接口
	if(null != words && words.size() != 0){
		Configuration cfg = DefaultConfig.getInstance();
		Dictionary dic = Dictionary.initial(cfg);
		dic = Dictionary.getSingleton();
		dic.addWords(words);
	}
	
	InputStream in = new ByteArrayInputStream(originTxt.getBytes());
	IKSegmenter ik = new IKSegmenter(new InputStreamReader(in), useSmart);
	
	List<String> result = new ArrayList<String>();
	Lexeme t = null;
	while( (t=ik.next()) != null){
		result.add(t.getLexemeText());
	}
	return result;
}
 
开发者ID:clw87,项目名称:fileadmin,代码行数:24,代码来源:AlgorithmUtils.java

示例2: createComponents

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
    Tokenizer token = new IKTokenizer(reader, useSmart);
    Map<String, String> paramsMap = new HashMap<String, String>();
    Configuration cfg = DefaultConfig.getInstance();
    paramsMap.put("luceneMatchVersion", luceneMatchVersion.toString());
    paramsMap.put("synonyms", cfg.getExtSynonymDictionarys().get(0));
    paramsMap.put("ignoreCase", "true");
    SynonymFilterFactory factory = new SynonymFilterFactory(paramsMap);
    ResourceLoader loader = new ClasspathResourceLoader();
    try {
        factory.inform(loader);
    } catch (IOException e) {
        e.printStackTrace();
    }
    return new TokenStreamComponents(token, factory.create(token));
}
 
开发者ID:xindongzhang,项目名称:IK-Analyzer-2012FF,代码行数:18,代码来源:IKSynonymAnalyzer.java

示例3: initial

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * 词典初始化
 * 由于IK Analyzer的词典采用Dictionary类的静态方法进行词典初始化
 * 只有当Dictionary类被实际调用时,才会开始载入词典,
 * 这将延长首次分词操作的时间
 * 该方法提供了一个在应用加载阶段就初始化字典的手段
 * @return Dictionary
 */
public static Dictionary initial(Configuration cfg){
	if(singleton == null){
		synchronized(Dictionary.class){
			if(singleton == null){
				singleton = new Dictionary(cfg);
				return singleton;
			}
		}
	}
	return singleton;
}
 
开发者ID:navis87,项目名称:IKAnalyzer,代码行数:20,代码来源:Dictionary.java

示例4: AnalyzeContext

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
public AnalyzeContext(Configuration cfg){
	this.cfg = cfg;
	this.segmentBuff = new char[BUFF_SIZE];
	this.charTypes = new int[BUFF_SIZE];
	this.buffLocker = new HashSet<String>();
	this.orgLexemes = new QuickSortSet();
	this.pathMap = new HashMap<Integer , LexemePath>();    	
	this.results = new LinkedList<Lexeme>();
}
 
开发者ID:navis87,项目名称:IKAnalyzer,代码行数:10,代码来源:AnalyzeContext.java

示例5: IKTokenizer

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * Lucene 4.0 Tokenizer适配器类构造函数
    */
public IKTokenizer(Configuration configuration){
    super();
    offsetAtt = addAttribute(OffsetAttribute.class);
    termAtt = addAttribute(CharTermAttribute.class);
    typeAtt = addAttribute(TypeAttribute.class);
       posIncrAtt = addAttribute(PositionIncrementAttribute.class);

       _IKImplement = new IKSegmenter(input,configuration);
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:13,代码来源:IKTokenizer.java

示例6: AnalyzeContext

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
public AnalyzeContext(Configuration configuration){
    this.cfg = configuration;
	this.segmentBuff = new char[BUFF_SIZE];
	this.charTypes = new int[BUFF_SIZE];
	this.buffLocker = new HashSet<String>();
	this.orgLexemes = new QuickSortSet();
	this.pathMap = new HashMap<Integer , LexemePath>();    	
	this.results = new LinkedList<Lexeme>();
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:10,代码来源:AnalyzeContext.java

示例7: Dictionary

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
private Dictionary(Configuration cfg){
	this.cfg = cfg;
	this.loadMainDict();
	this.loadExtDict();
	this.loadStopWordDict();
	this.loadQuantifierDict();
}
 
开发者ID:TFdream,项目名称:lucene-analyzer-ik,代码行数:8,代码来源:Dictionary.java

示例8: initial

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * 词典初始化
 * 由于IK Analyzer的词典采用Dictionary类的静态方法进行词典初始化
 * 只有当Dictionary类被实际调用时,才会开始载入词典,
 * 这将延长首次分词操作的时间
 * 该方法提供了一个在应用加载阶段就初始化字典的手段
 * @return Dictionary
 */
public static Dictionary initial(Configuration cfg){
	if(singleton == null){
		synchronized(Dictionary.class){
			if(singleton == null){
				singleton = new Dictionary(cfg);
			}
		}
	}
	return singleton;
}
 
开发者ID:TFdream,项目名称:lucene-analyzer-ik,代码行数:19,代码来源:Dictionary.java

示例9: Dictionary

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
private Dictionary(Configuration cfg){
	this.cfg = cfg;
	this.loadMainDict();
	this.loadStopWordDict();
	this.loadQuantifierDict();
}
 
开发者ID:navis87,项目名称:IKAnalyzer,代码行数:7,代码来源:Dictionary.java

示例10: IKSegmenter

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * IK分词器构造函数
 * @param input
    */
public IKSegmenter(Reader input ,Configuration configuration){
	this.input = input;
       this.configuration = configuration;
       this.init();
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:10,代码来源:IKSegmenter.java

示例11: IkTokenizerFactory

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
public IkTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
 configuration=new Configuration(env,settings);
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:5,代码来源:IkTokenizerFactory.java

示例12: IkAnalyzerProvider

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
public IkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings,boolean useSmart) {
    super(indexSettings, name, settings);

    Configuration configuration=new Configuration(env,settings).setUseSmart(useSmart);

    analyzer=new IKAnalyzer(configuration);
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:8,代码来源:IkAnalyzerProvider.java

示例13: IKSegmenter

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * IK分词器构造函数
 * @param input
 * @param cfg 使用自定义的Configuration构造分词器
 * 
 */
public IKSegmenter(Reader input , Configuration cfg){
	this.input = input;
	this.cfg = cfg;
	this.init();
}
 
开发者ID:navis87,项目名称:IKAnalyzer,代码行数:12,代码来源:IKSegmenter.java

示例14: IKAnalyzer

import org.wltea.analyzer.cfg.Configuration; //导入依赖的package包/类
/**
 * IK分词器Lucene Analyzer接口实现类
 * 
 * @param configuration IK配置
 */
public IKAnalyzer(Configuration configuration){
	super();
       this.configuration = configuration;
}
 
开发者ID:judasn,项目名称:Elasticsearch-Tutorial-zh-CN,代码行数:10,代码来源:IKAnalyzer.java


注:本文中的org.wltea.analyzer.cfg.Configuration类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。