本文整理汇总了Java中edu.stanford.nlp.stats.Distribution.laplaceSmoothedDistribution方法的典型用法代码示例。如果您正苦于以下问题:Java Distribution.laplaceSmoothedDistribution方法的具体用法?Java Distribution.laplaceSmoothedDistribution怎么用?Java Distribution.laplaceSmoothedDistribution使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.stats.Distribution
的用法示例。
在下文中一共展示了Distribution.laplaceSmoothedDistribution方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: finishTraining
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
@Override
public void finishTraining() {
lex.finishTraining();
int numTags = tagIndex.size();
POSes = new HashSet<String>(tagIndex.objectsList());
initialPOSDist = Distribution.laplaceSmoothedDistribution(initial, numTags, 0.5);
markovPOSDists = new HashMap<String, Distribution>();
Set entries = ruleCounter.lowestLevelCounterEntrySet();
for (Iterator iter = entries.iterator(); iter.hasNext();) {
Map.Entry entry = (Map.Entry) iter.next();
// Map.Entry<List<String>, Counter> entry = (Map.Entry<List<String>, Counter>) iter.next();
Distribution d = Distribution.laplaceSmoothedDistribution((ClassicCounter) entry.getValue(), numTags, 0.5);
markovPOSDists.put(((List<String>) entry.getKey()).get(0), d);
}
}
示例2: train
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
public void train(Collection<Tree> trees) {
Numberer tagNumberer = Numberer.getGlobalNumberer("tags");
lex.train(trees);
ClassicCounter<String> initial = new ClassicCounter<String>();
GeneralizedCounter ruleCounter = new GeneralizedCounter(2);
for (Tree tree : trees) {
List<Label> tags = tree.preTerminalYield();
String last = null;
for (Label tagLabel : tags) {
String tag = tagLabel.value();
tagNumberer.number(tag);
if (last == null) {
initial.incrementCount(tag);
} else {
ruleCounter.incrementCount2D(last, tag);
}
last = tag;
}
}
int numTags = tagNumberer.total();
POSes = new HashSet<String>(ErasureUtils.<Collection<String>>uncheckedCast(tagNumberer.objects()));
initialPOSDist = Distribution.laplaceSmoothedDistribution(initial, numTags, 0.5);
markovPOSDists = new HashMap<String, Distribution>();
Set entries = ruleCounter.lowestLevelCounterEntrySet();
for (Iterator iter = entries.iterator(); iter.hasNext();) {
Map.Entry entry = (Map.Entry) iter.next();
// Map.Entry<List<String>, Counter> entry = (Map.Entry<List<String>, Counter>) iter.next();
Distribution d = Distribution.laplaceSmoothedDistribution((ClassicCounter) entry.getValue(), numTags, 0.5);
markovPOSDists.put(((List<String>) entry.getKey()).get(0), d);
}
}
示例3: computeInputPrior
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
protected Distribution<String> computeInputPrior(Map<String, List<List<String>>> allTrainPaths) {
ClassicCounter<String> result = new ClassicCounter<String>();
for (Iterator<List<List<String>>> catI = allTrainPaths.values().iterator(); catI.hasNext();) {
List<List<String>> pathList = catI.next();
for (List<String> path : pathList) {
for (String input : path) {
result.incrementCount(input);
}
}
}
return Distribution.laplaceSmoothedDistribution(result, result.size() * 2, 0.5);
}
示例4: computeInputPrior
import edu.stanford.nlp.stats.Distribution; //导入方法依赖的package包/类
protected static Distribution<String> computeInputPrior(Map<String, List<List<String>>> allTrainPaths) {
ClassicCounter<String> result = new ClassicCounter<String>();
for (List<List<String>> pathList : allTrainPaths.values()) {
for (List<String> path : pathList) {
for (String input : path) {
result.incrementCount(input);
}
}
}
return Distribution.laplaceSmoothedDistribution(result, result.size() * 2, 0.5);
}