本文整理汇总了Java中edu.stanford.nlp.stats.Distribution.getDistribution方法的典型用法代码示例。如果您正苦于以下问题:Java Distribution.getDistribution方法的具体用法?Java Distribution.getDistribution怎么用?Java Distribution.getDistribution使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.stats.Distribution
的用法示例。
在下文中一共展示了Distribution.getDistribution方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSegmentedWordLengthDistribution
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
private Distribution<Integer> getSegmentedWordLengthDistribution(Treebank tb) {
// CharacterLevelTagExtender ext = new CharacterLevelTagExtender();
ClassicCounter<Integer> c = new ClassicCounter<Integer>();
for (Iterator iterator = tb.iterator(); iterator.hasNext();) {
Tree gold = (Tree) iterator.next();
StringBuilder goldChars = new StringBuilder();
Sentence goldYield = gold.yield();
for (Iterator wordIter = goldYield.iterator(); wordIter.hasNext();) {
Word word = (Word) wordIter.next();
goldChars.append(word);
}
Sentence ourWords = segmentWords(goldChars.toString());
for (int i = 0; i < ourWords.size(); i++) {
c.incrementCount(Integer.valueOf(ourWords.get(i).toString().length()));
}
}
return Distribution.getDistribution(c);
}
示例2: getSegmentedWordLengthDistribution
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
private Distribution<Integer> getSegmentedWordLengthDistribution(Treebank tb) {
// CharacterLevelTagExtender ext = new CharacterLevelTagExtender();
ClassicCounter<Integer> c = new ClassicCounter<Integer>();
for (Iterator iterator = tb.iterator(); iterator.hasNext();) {
Tree gold = (Tree) iterator.next();
StringBuilder goldChars = new StringBuilder();
ArrayList goldYield = gold.yield();
for (Iterator wordIter = goldYield.iterator(); wordIter.hasNext();) {
Word word = (Word) wordIter.next();
goldChars.append(word);
}
List<HasWord> ourWords = segment(goldChars.toString());
for (int i = 0; i < ourWords.size(); i++) {
c.incrementCount(Integer.valueOf(ourWords.get(i).word().length()));
}
}
return Distribution.getDistribution(c);
}
示例3: argVectorsDiffer
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
private boolean argVectorsDiffer(Counter<String> args1, Counter<String> args2) {
System.out.println("argVectorsDiffer top!");
Distribution<String> dist1 = Distribution.getDistribution(args1);
Distribution<String> dist2 = Distribution.getDistribution(args2);
Set<String> argdiffs = new HashSet<String>();
for( String token : dist1.keySet() ) {
double prob1 = dist1.getCount(token);
if( dist1.getCount(token) > 0.02 ) {
double prob2 = dist2.getCount(token);
double ratio = (prob1 < prob2 ? prob1 / prob2 : prob2 / prob1);
System.out.printf("- %s\t%.4f\t%.4f\tratio=%.4f\n", token, prob1, prob2, ratio);
if( ratio < 0.2 ) {
argdiffs.add(token);
System.out.println(" arg differs: " + token);
}
}
}
if( argdiffs.size() >= 2 ) {
System.out.println("Arg vectors differ!!");
return true;
}
return false;
}