本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.logSum方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.logSum方法的具体用法?Java ArrayMath.logSum怎么用?Java ArrayMath.logSum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.logSum方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: unnormalizedLogProbFront
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double unnormalizedLogProbFront(int[] labels) {
int startIndex = indicesFront(labels);
int numCellsToSum = SloppyMath.intPow(numClasses, windowSize - labels.length);
// double[] masses = new double[labels.length];
// for (int i = 0; i < masses.length; i++) {
// masses[i] = table[labels[i]];
// }
return ArrayMath.logSum(table, startIndex, startIndex + numCellsToSum);
}
示例2: getApproximateScores
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private double[] getApproximateScores(History h) {
String[] tags = stringTagsAt(h.current - h.start + leftWindow());
double[] scores = getHistories(tags, h); // log score for each active tag, unnormalized
// Number of tags that get assigned a default score:
int nDefault = maxentTagger.ySize - tags.length;
double logScore = ArrayMath.logSum(scores);
double logScoreInactiveTags = maxentTagger.getInactiveTagDefaultScore(nDefault);
double logTotal = SloppyMath.logAdd(logScore, logScoreInactiveTags);
ArrayMath.addInPlace(scores, -logTotal);
return scores;
}
示例3: unnormalizedLogProbEnd
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double unnormalizedLogProbEnd(int[] labels) {
labels = indicesEnd(labels);
double[] masses = new double[labels.length];
for (int i = 0; i < masses.length; i++) {
masses[i] = table[labels[i]];
}
return ArrayMath.logSum(masses);
}
示例4: conditionalLogProbGivenNext
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Computes the probability of the tag OF being at the beginning of the table
* given that the tag sequence GIVEN is at the end of the table. given is at
* the end, of is at the beginning
*
* @return the probability of the tag of being at the beginning of the table
*/
public double conditionalLogProbGivenNext(int[] given, int of) {
if (given.length != windowSize - 1) {
throw new IllegalArgumentException("conditionalLogProbGivenNext requires given one less than clique size (" +
windowSize + ") but was " + Arrays.toString(given));
}
int[] label = indicesEnd(given);
double[] masses = new double[label.length];
for (int i = 0; i < masses.length; i++) {
masses[i] = table[label[i]];
}
double z = ArrayMath.logSum(masses);
return table[indexOf(of, given)] - z;
}
示例5: unnormalizedLogProbFront
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public float unnormalizedLogProbFront(int[] label) {
label = indicesFront(label);
float[] masses = new float[label.length];
for (int i = 0; i < masses.length; i++) {
masses[i] = table[label[i]];
}
return ArrayMath.logSum(masses);
}
示例6: sumOutEnd
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public FactorTable sumOutEnd() {
FactorTable ft = new FactorTable(numClasses, windowSize - 1);
for (int i = 0, sz = ft.size(); i < sz; i++) {
ft.table[i] = ArrayMath.logSum(table, i * numClasses, (i+1) * numClasses);
}
/*
for (int i = 0; i < table.length; i++) {
ft.logIncrementValue(i / numClasses, table[i]);
}
*/
return ft;
}
示例7: conditionalLogProb
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public float conditionalLogProb(int[] given, int of) {
if (given.length != windowSize - 1) {
System.err.println("error computing conditional log prob");
System.exit(0);
}
int[] label = indicesFront(given);
float[] masses = new float[label.length];
for (int i = 0; i < masses.length; i++) {
masses[i] = table[label[i]];
}
float z = ArrayMath.logSum(masses);
return table[indexOf(given, of)] - z;
}
示例8: unnormalizedLogProbEnd
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public float unnormalizedLogProbEnd(int[] label) {
label = indicesEnd(label);
float[] masses = new float[label.length];
for (int i = 0; i < masses.length; i++) {
masses[i] = table[label[i]];
}
return ArrayMath.logSum(masses);
}
示例9: recenter
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
static double[] recenter(double[] x) {
double[] r = new double[x.length];
// double logTotal = Double.NEGATIVE_INFINITY;
// for (int i = 0; i < x.length; i++)
// logTotal = SloppyMath.logAdd(logTotal, x[i]);
double logTotal = ArrayMath.logSum(x);
for (int i = 0; i < x.length; i++) {
r[i] = x[i] - logTotal;
}
return r;
}
示例10: totalMass
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double totalMass() {
return ArrayMath.logSum(table);
}
示例11: logLikelihoodNeg
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Calculate the log-likelihood from scratch, hashing the conditional
* probabilities in pcond, which we will use later. This is for
* a different model, in which all features effectively get negative weights
* this model is easier to use for heauristic search
* p(ti|s)=exp(sum_j{-(e^lambda_j)*f_j(ti)})
*
* @return The negative log likelihood of the data
*/
public double logLikelihoodNeg() {
// zero all the variables
double s = 0;
for (int i = 0; i < probConds.length; i++) {
for (int j = 0; j < probConds[i].length; j++) {
probConds[i][j] = 0;
}
zlambda[i] = 0;
}
//add up in pcond y|x the unnormalized scores
for (int fNo = 0, fSize = p.fSize; fNo < fSize; fNo++) {
// add for all occurences of the function the values to probConds
Feature f = p.functions.get(fNo);
double fLambda = -Math.exp(lambda[fNo]);
double sum = ftildeArr[fNo];
//if(sum==0){continue;}
sum *= p.data.getNumber();
s -= sum * fLambda;
if (Math.abs(fLambda) > 200) { // was 50
System.out.println("lambda " + fNo + " too big: " + fLambda);
}
for (int i = 0, length = f.len(); i < length; i++) {
int x = f.getX(i);
int y = f.getY(i);
if (ASSUME_BINARY) {
probConds[x][y] += fLambda;
} else {
double val = f.getVal(i);
probConds[x][y] += (val * fLambda);
}
} //for
} //for fNo
for (int x = 0; x < probConds.length; x++) {
//again
zlambda[x] = ArrayMath.logSum(probConds[x]); // cpu samples #4,#15: 4.5%
//System.out.println("zlambda "+x+" "+zlambda[x]);
s += zlambda[x] * p.data.ptildeX(x) * p.data.getNumber();
for (int y = 0; y < probConds[x].length; y++) {
probConds[x][y] = divide(probConds[x][y], zlambda[x]); // cpu samples #13: 1.6%
//System.out.println("prob "+x+" "+y+" "+probConds[x][y]);
} //y
}//x
if (s < 0) {
System.out.println("neg log lik smaller than 0 " + s);
//System.exit(0);
}
return s;
}
示例12: expectedValue
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Each pair x,y has a value in p.data.values[x][y]
*
* @return - expected value of corpus -sum_xy (ptilde(x,y)*value(x,y)*pcond(x,y))
*/
public double expectedValue() {
// zero all the variables
double s = 0;
aux = new double[probConds.length];
for (int i = 0; i < probConds.length; i++) {
for (int j = 0; j < probConds[i].length; j++) {
probConds[i][j] = 0;
}
zlambda[i] = 0;
}
//add up in pcond y|x the unnormalized scores
for (int fNo = 0, fSize = p.fSize; fNo < fSize; fNo++) {
// add for all occurrences of the function the values to probConds
Feature f = p.functions.get(fNo);
double fLambda = lambda[fNo];
if (Math.abs(fLambda) > 200) { // was 50
System.err.println("lambda " + fNo + " too big: " + fLambda);
}
for (int i = 0, length = f.len(); i < length; i++) {
int x = f.getX(i);
int y = f.getY(i);
if (ASSUME_BINARY) {
probConds[x][y] += fLambda;
} else {
double val = f.getVal(i);
probConds[x][y] += (val * fLambda);
}
} //for
} //for fNo
Experiments exp = p.data;
for (int x = 0; x < probConds.length; x++) {
//again
zlambda[x] = ArrayMath.logSum(probConds[x]); // cpu samples #4,#15: 4.5%
//System.err.println("zlambda "+x+" "+zlambda[x]);
for (int y = 0; y < probConds[x].length; y++) {
probConds[x][y] = divide(probConds[x][y], zlambda[x]); // cpu samples #13: 1.6%
//System.err.println("prob "+x+" "+y+" "+probConds[x][y]);
s -= exp.values[x][y] * probConds[x][y] * exp.ptildeX(x) * exp.getNumber();
aux[x] += exp.values[x][y] * probConds[x][y];
}
}//x
return s;
}
示例13: logSum
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Returns ArrayMath.logSum of the values in this counter.
*
* @param c Argument counter (which is not modified)
* @return ArrayMath.logSum of the values in this counter.
*/
public static <E> double logSum(Counter<E> c) {
return ArrayMath.logSum(ArrayMath.unbox(c.values()));
}
示例14: logSum
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Returns ArrayMath.logSum of the values in this counter.
*
* @param c
* Argument counter (which is not modified)
* @return ArrayMath.logSum of the values in this counter.
*/
public static <E> double logSum(Counter<E> c) {
return ArrayMath.logSum(ArrayMath.unbox(c.values()));
}