本文整理汇总了Java中cc.mallet.types.Labeling.getBestIndex方法的典型用法代码示例。如果您正苦于以下问题:Java Labeling.getBestIndex方法的具体用法?Java Labeling.getBestIndex怎么用?Java Labeling.getBestIndex使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.types.Labeling
的用法示例。
在下文中一共展示了Labeling.getBestIndex方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getValue
import cc.mallet.types.Labeling; //导入方法依赖的package包/类
public double getValue ()
{
if (cachedValueStale) {
numGetValueCalls++;
cachedValue = 0;
// We'll store the expectation values in "cachedGradient" for now
cachedGradientStale = true;
MatrixOps.setAll (cachedGradient, 0.0);
// Incorporate likelihood of data
double[] scores = new double[trainingList.getTargetAlphabet().size()];
double value = 0.0;
Iterator<Instance> iter = trainingList.iterator();
int ii=0;
while (iter.hasNext()) {
ii++;
Instance instance = iter.next();
double instanceWeight = trainingList.getInstanceWeight(instance);
Labeling labeling = instance.getLabeling ();
if (labeling == null)
continue;
//System.out.println("L Now "+inputAlphabet.size()+" regular features.");
this.theClassifier.getClassificationScores (instance, scores);
FeatureVector fv = (FeatureVector) instance.getData ();
int li = labeling.getBestIndex();
value = - (instanceWeight * Math.log (scores[li]));
if(Double.isNaN(value)) {
logger.fine ("MaxEntTrainer: Instance " + instance.getName() +
"has NaN value. log(scores)= " + Math.log(scores[li]) +
" scores = " + scores[li] +
" has instance weight = " + instanceWeight);
}
if (Double.isInfinite(value)) {
logger.warning ("Instance "+instance.getSource() + " has infinite value; skipping value and gradient");
cachedValue -= value;
cachedValueStale = false;
return -value;
// continue;
}
cachedValue += value;
for (int si = 0; si < scores.length; si++) {
if (scores[si] == 0) continue;
assert (!Double.isInfinite(scores[si]));
MatrixOps.rowPlusEquals (cachedGradient, numFeatures,
si, fv, -instanceWeight * scores[si]);
cachedGradient[numFeatures*si + defaultFeatureIndex] += (-instanceWeight * scores[si]);
}
}
//logger.info ("-Expectations:"); cachedGradient.print();
// Incorporate prior on parameters
double prior = 0;
if (usingHyperbolicPrior) {
for (int li = 0; li < numLabels; li++)
for (int fi = 0; fi < numFeatures; fi++)
prior += (hyperbolicPriorSlope / hyperbolicPriorSharpness
* Math.log (Maths.cosh (hyperbolicPriorSharpness * parameters[li *numFeatures + fi])));
}
else if (usingGaussianPrior) {
for (int li = 0; li < numLabels; li++)
for (int fi = 0; fi < numFeatures; fi++) {
double param = parameters[li*numFeatures + fi];
prior += param * param / (2 * gaussianPriorVariance);
}
}
double oValue = cachedValue;
cachedValue += prior;
cachedValue *= -1.0; // MAXIMIZE, NOT MINIMIZE
cachedValueStale = false;
progressLogger.info ("Value (labelProb="+oValue+" prior="+prior+") loglikelihood = "+cachedValue);
}
return cachedValue;
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:76,代码来源:MaxEntOptimizableByLabelLikelihood.java
示例2: train
import cc.mallet.types.Labeling; //导入方法依赖的package包/类
/**
* Trains winnow on the instance list, updating
* {@link #weights weights} according to errors
* @param ilist Instance list to be trained on
* @return Classifier object containing learned weights
*/
public Winnow train (InstanceList trainingList)
{
FeatureSelection selectedFeatures = trainingList.getFeatureSelection();
if (selectedFeatures != null)
// xxx Attend to FeatureSelection!!!
throw new UnsupportedOperationException ("FeatureSelection not yet implemented.");
// if "train" is run more than once,
// we will be reinitializing the weights
// TODO: provide method to save weights
trainingList.getDataAlphabet().stopGrowth();
trainingList.getTargetAlphabet().stopGrowth();
Pipe dataPipe = trainingList.getPipe ();
Alphabet dict = (Alphabet) trainingList.getDataAlphabet ();
int numLabels = trainingList.getTargetAlphabet().size();
int numFeats = dict.size();
this.theta = numFeats * this.nfactor;
this.weights = new double [numLabels][numFeats];
// init weights to 1
for(int i=0; i<numLabels; i++)
for(int j=0; j<numFeats; j++)
this.weights[i][j] = 1.0;
//System.out.println("Init weights to 1. Theta= "+theta);
// loop through all instances
for (int ii = 0; ii < trainingList.size(); ii++){
Instance inst = (Instance) trainingList.get(ii);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
double[] results = new double [numLabels];
int fvisize = fv.numLocations();
int correctIndex = labeling.getBestIndex();
for(int rpos=0; rpos < numLabels; rpos++)
results[rpos]=0;
// sum up xi*wi for each class
for(int fvi=0; fvi < fvisize; fvi++){
int fi = fv.indexAtLocation(fvi);
//System.out.println("feature index "+fi);
for(int lpos=0; lpos < numLabels; lpos++)
results[lpos] += this.weights[lpos][fi];
}
//System.out.println("In instance " + ii);
// make guess for each label using threshold
// update weights according to alpha and beta
// upon incorrect guess
for(int ri=0; ri < numLabels; ri++){
if(results[ri] > this.theta){ // guess 1
if(correctIndex != ri) // correct is 0
demote(ri, fv);
}
else{ // guess 0
if(correctIndex == ri) // correct is 1
promote(ri, fv);
}
}
// System.out.println("Results guessed:")
// for(int x=0; x<numLabels; x++)
// System.out.println(results[x]);
// System.out.println("Correct label: "+correctIndex );
// System.out.println("Weights are");
// for(int h=0; h<numLabels; h++){
// for(int g=0; g<numFeats; g++)
// System.out.println(weights[h][g]);
// System.out.println("");
// }
}
classifier = new Winnow (dataPipe, weights, theta, numLabels, numFeats);
return classifier;
}
示例3: valueOfCorrectLabel
import cc.mallet.types.Labeling; //导入方法依赖的package包/类
public double valueOfCorrectLabel ()
{
Labeling correctLabeling = instance.getLabeling();
int correctLabelIndex = correctLabeling.getBestIndex();
return labeling.value (correctLabelIndex);
}