本文整理汇总了Java中edu.stanford.nlp.process.WordSegmenter类的典型用法代码示例。如果您正苦于以下问题:Java WordSegmenter类的具体用法?Java WordSegmenter怎么用?Java WordSegmenter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WordSegmenter类属于edu.stanford.nlp.process包,在下文中一共展示了WordSegmenter类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: factory
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public static TokenizerFactory<HasWord> factory(WordSegmenter wordSegmenter) {
return new WordSegmentingTokenizerFactory(wordSegmenter);
}
示例2: lex
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
/**
* Returns a ChineseLexicon
*/
@Override
public Lexicon lex(Options.LexOptions op) {
if (useCharacterBasedLexicon) {
return lex = new ChineseCharacterBasedLexicon();
// } else if (useMaxentLexicon) {
// return lex = new ChineseMaxentLexicon();
}
if (op.uwModel == null) {
op.uwModel = "edu.stanford.nlp.parser.lexparser.ChineseUnknownWordModel";
}
ChineseLexicon clex = new ChineseLexicon(op);
WordSegmenter seg;
/*if (segmentMarkov) {
seg = new ChineseMarkovWordSegmenter();
} else if (segmentMaxMatch) {
seg = new MaxMatchSegmenter();
}*/
seg = segmenter;
if (seg != null) {
lex = new ChineseLexiconAndWordSegmenter(clex, seg);
} else {
lex = clex;
}
return lex;
}
示例3: ChineseLexiconAndWordSegmenter
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public ChineseLexiconAndWordSegmenter(ChineseLexicon lex, WordSegmenter seg) {
chineseLexicon = lex;
wordSegmenter = seg;
ChineseTreebankLanguagePack.setTokenizerFactory(WordSegmentingTokenizer.factory(seg));
}
示例4: WordSegmentingTokenizer
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public WordSegmentingTokenizer(WordSegmenter wordSegmenter, Reader r) {
this.wordSegmenter = wordSegmenter;
tok = new WhitespaceTokenizer(r);
}
示例5: factory
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public static TokenizerFactory<Word> factory(WordSegmenter wordSegmenter) {
return new WordSegmentingTokenizerFactory(wordSegmenter);
}
示例6: WordSegmentingTokenizerFactory
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public WordSegmentingTokenizerFactory(WordSegmenter wordSegmenter) {
this.wordSegmenter = wordSegmenter;
}
示例7: ChineseLexiconAndWordSegmenter
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public ChineseLexiconAndWordSegmenter(ChineseLexicon lex, WordSegmenter seg) {
chineseLexicon = lex;
wordSegmenter = seg;
}
示例8: WordSegmentingTokenizer
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public WordSegmentingTokenizer(WordSegmenter segmenter, Reader r) {
this(segmenter, WhitespaceTokenizer.newCoreLabelWhitespaceTokenizer(r));
}
示例9: WordSegmentingTokenizerFactory
import edu.stanford.nlp.process.WordSegmenter; //导入依赖的package包/类
public WordSegmentingTokenizerFactory(WordSegmenter wordSegmenter) {
segmenter = wordSegmenter;
}