本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.sum方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.sum方法的具体用法?Java ArrayMath.sum怎么用?Java ArrayMath.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.sum方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: greedyDecode
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* THIS CLOBBERS THE LABELS, stores its decoding into them.
* Does progressive rolling edge feature extraction
**/
public void greedyDecode(ModelSentence sentence, boolean storeConfidences) {
int T = sentence.T;
sentence.labels = new int[T];
sentence.edgeFeatures[0] = startMarker();
if (storeConfidences) sentence.confidences = new double[T];
double[] labelScores = new double[numLabels];
for (int t=0; t<T; t++) {
computeLabelScores(t, sentence, labelScores);
sentence.labels[t] = ArrayMath.argmax(labelScores);
if (t < T-1)
sentence.edgeFeatures[t+1] = sentence.labels[t];
if (storeConfidences) {
ArrayMath.expInPlace(labelScores);
double Z = ArrayMath.sum(labelScores);
ArrayMath.multiplyInPlace(labelScores, 1.0/Z);
sentence.confidences[t] = labelScores[ sentence.labels[t] ];
}
}
}
示例2: wbSmoothing
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private static float[] wbSmoothing(int[] counts, int[] totalCounts) {
float[] p = new float[counts.length];
double cN = ArrayMath.sum(counts);
double cNT = ArrayMath.sum(totalCounts);
double lambda = cN / (cN + W);
for (int i = 0; i < counts.length; ++i) {
double p_mle = counts[i] / cN;
double p_backoff = totalCounts[i] / cNT;
p[i] = (float) (lambda * p_mle + (1.0 - lambda) * p_backoff);
}
return p;
}
示例3: addOneSmoothing
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private static float[] addOneSmoothing(int[] counts) {
float[] p = new float[counts.length];
float n = ArrayMath.sum(counts) + counts.length * DELTA;
for (int binI = 0; binI < counts.length; ++binI) {
p[binI] = (counts[binI] * 1.0f + DELTA) / n;
}
return p;
}
示例4: getHighPrecisionFeatures
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Returns a list of featured thresholded by minPrecision and sorted by their frequency of occurrence.
* precision in this case, is defined as the frequency of majority label over total frequency for that feature.
* @return list of high precision features.
*/
private List<F> getHighPrecisionFeatures(GeneralDataset<L,F> dataset, double minPrecision, int maxNumFeatures){
int[][] feature2label = new int[dataset.numFeatures()][dataset.numClasses()];
for(int f = 0; f < dataset.numFeatures(); f++)
Arrays.fill(feature2label[f],0);
int[][] data = dataset.data;
int[] labels = dataset.labels;
for(int d = 0; d < data.length; d++){
int label = labels[d];
//System.out.println("datum id:"+d+" label id: "+label);
if(data[d] != null){
//System.out.println(" number of features:"+data[d].length);
for(int n = 0; n < data[d].length; n++){
feature2label[data[d][n]][label]++;
}
}
}
Counter<F> feature2freq = new ClassicCounter<F>();
for(int f = 0; f < dataset.numFeatures(); f++){
int maxF = ArrayMath.max(feature2label[f]);
int total = ArrayMath.sum(feature2label[f]);
double precision = ((double)maxF)/total;
F feature = dataset.featureIndex.get(f);
if(precision >= minPrecision){
feature2freq.incrementCount(feature, total);
}
}
if(feature2freq.size() > maxNumFeatures){
Counters.retainTop(feature2freq, maxNumFeatures);
}
//for(F feature : feature2freq.keySet())
//System.out.println(feature+" "+feature2freq.getCount(feature));
//System.exit(0);
return Counters.toSortedList(feature2freq);
}