本文整理汇总了Java中cc.mallet.types.MatrixOps.max方法的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps.max方法的具体用法?Java MatrixOps.max怎么用?Java MatrixOps.max使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.types.MatrixOps
的用法示例。
在下文中一共展示了MatrixOps.max方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: diag
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public static Matrix diag (int[] sizes, double v)
{
int maxN = MatrixOps.max (sizes);
double[] vals = new double[maxN];
Arrays.fill (vals, v);
/* Compute indices of diagonals */
int[] idxs = new int [maxN];
for (int i = 0; i < idxs.length; i++) {
int[] oneIdx = new int [sizes.length];
Arrays.fill (oneIdx, i);
idxs[i] = Matrixn.singleIndex (sizes, oneIdx);
}
return new SparseMatrixn (sizes, idxs, vals);
}
示例2: getClassificationScoresWithTemperature
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getClassificationScoresWithTemperature (Instance instance, double temperature, double[] scores)
{
getUnnormalizedClassificationScores(instance, scores);
//scores should be divided by temperature, scores are sum of weighted features
MatrixOps.timesEquals(scores, 1/temperature);
// Move scores to a range where exp() is accurate, and normalize
int numLabels = getLabelAlphabet().size();
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}
示例3: getClassificationScores
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getClassificationScores (Instance instance, double[] scores)
{
int numLabels = getLabelAlphabet().size();
assert (scores.length == numLabels);
FeatureVector fv = (FeatureVector) instance.getData ();
// Make sure the feature vector's feature dictionary matches
// what we are expecting from our data pipe (and thus our notion
// of feature probabilities.
assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ());
// arrayOutOfBounds if pipe has grown since training
// int numFeatures = getAlphabet().size() + 1;
int numFeatures = this.defaultFeatureIndex + 1;
// Include the feature weights according to each label
for (int li = 0; li < numLabels; li++) {
scores[li] = parameters[li*numFeatures + defaultFeatureIndex]
+ MatrixOps.rowDotProduct (parameters, numFeatures,
li, fv,
defaultFeatureIndex,
(perClassFeatureSelection == null
? featureSelection
: perClassFeatureSelection[li]));
// xxxNaN assert (!Double.isNaN(scores[li])) : "li="+li;
}
// Move scores to a range where exp() is accurate, and normalize
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}
示例4: getClassificationScores
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getClassificationScores (Instance instance, double[] scores)
{
FeatureVectorSequence fvs = (FeatureVectorSequence)instance.getData();
int numFeatures = instance.getDataAlphabet().size()+1;
int numLabels = fvs.size();
assert (scores.length == fvs.size());
for (int instanceNumber=0; instanceNumber < fvs.size(); instanceNumber++) {
FeatureVector fv = (FeatureVector)fvs.get(instanceNumber);
// Make sure the feature vector's feature dictionary matches
// what we are expecting from our data pipe (and thus our notion
// of feature probabilities.
assert (fv.getAlphabet ()
== this.instancePipe.getDataAlphabet ());
// Include the feature weights according to each label
scores[instanceNumber] = parameters[0*numFeatures + defaultFeatureIndex]
+ MatrixOps.rowDotProduct (parameters, numFeatures,
0, fv,
defaultFeatureIndex,
(perClassFeatureSelection == null
? featureSelection
: perClassFeatureSelection[0]));
}
// Move scores to a range where exp() is accurate, and normalize
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}
示例5: getClassificationScores
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getClassificationScores (Instance instance, double[] scores)
{
getUnnormalizedClassificationScores(instance, scores);
// Move scores to a range where exp() is accurate, and normalize
int numLabels = getLabelAlphabet().size();
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}
示例6: expNormalize
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
private static void expNormalize (double[] a)
{
double max = MatrixOps.max (a);
double sum = 0;
for (int i = 0; i < a.length; i++) {
assert(!Double.isNaN(a[i]));
a[i] = Math.exp (a[i] - max);
sum += a[i];
}
for (int i = 0; i < a.length; i++) {
a[i] /= sum;
}
}
示例7: maxL1MarginalDistance
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public static double maxL1MarginalDistance (FactorGraph mdl, Inferencer inf1, Inferencer inf2)
{
double[] dist = allL1MarginalDistance (mdl, inf1, inf2);
return MatrixOps.max (dist);
}
示例8: combine
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public double combine (double[] scores) {
return MatrixOps.max(scores);
}
示例9: toString
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public String toString () {
StringBuffer sb = new StringBuffer ();
int maxLabelNameLength = 0;
LabelAlphabet labelAlphabet = trial.getClassifier().getLabelAlphabet();
for (int i = 0; i < numClasses; i++) {
int len = labelAlphabet.lookupLabel(i).toString().length();
if (maxLabelNameLength < len) {
maxLabelNameLength = len;
}
}
// These counts will be integers, but we'll keep them as doubles so we can divide later
double[] correctLabelCounts = new double[values.length];
for (int i = 0; i < correctLabelCounts.length; i++){
// This sum is the number of instances whose correct class is i
correctLabelCounts[i] = MatrixOps.sum(values[i]);
}
// Find the count of the most frequent class and divide that by
// the total number of instances.
double baselineAccuracy = MatrixOps.max(correctLabelCounts) / MatrixOps.sum(correctLabelCounts);
sb.append ("Confusion Matrix, row=true, column=predicted accuracy="+trial.getAccuracy()+" most-frequent-tag baseline="+baselineAccuracy+"\n");
for (int i = 0; i < maxLabelNameLength-5+4; i++) { sb.append (' '); }
sb.append ("label");
for (int c2 = 0; c2 < Math.min(10,numClasses); c2++) { sb.append (" "+c2); }
for (int c2 = 10; c2 < numClasses; c2++) { sb.append (" " + c2); }
sb.append (" |total\n");
for (int c = 0; c < numClasses; c++) {
appendJustifiedInt (sb, c, false);
String labelName = labelAlphabet.lookupLabel(c).toString();
for (int i = 0; i < maxLabelNameLength-labelName.length(); i++) { sb.append (' '); }
sb.append (" "+labelName+" ");
for (int c2 = 0; c2 < numClasses; c2++) {
appendJustifiedInt (sb, values[c][c2], true);
sb.append (' ');
}
sb.append (" |"+ MatrixOps.sum(values[c]));
sb.append ('\n');
}
return sb.toString();
}