本文整理汇总了Java中de.bwaldvogel.liblinear.Linear.predictValues方法的典型用法代码示例。如果您正苦于以下问题:Java Linear.predictValues方法的具体用法?Java Linear.predictValues怎么用?Java Linear.predictValues使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类de.bwaldvogel.liblinear.Linear
的用法示例。
在下文中一共展示了Linear.predictValues方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: score
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
@Override
public Map<OUTCOME_TYPE, Double> score(List<Feature> features) throws CleartkProcessingException {
FeatureNode[] encodedFeatures = this.featuresEncoder.encodeAll(features);
// get score for each outcome
int[] encodedOutcomes = this.model.getLabels();
double[] scores = new double[encodedOutcomes.length];
if (this.model.isProbabilityModel()) {
Linear.predictProbability(this.model, encodedFeatures, scores);
} else {
Linear.predictValues(this.model, encodedFeatures, scores);
}
// handle 2-class model, which is special-cased by LIBLINEAR to only return one score
if (this.model.getNrClass() == 2 && scores[1] == 0.0) {
scores[1] = -scores[0];
}
// create scored outcome objects
Map<OUTCOME_TYPE, Double> scoredOutcomes = Maps.newHashMap();
for (int i = 0; i < encodedOutcomes.length; ++i) {
OUTCOME_TYPE outcome = this.outcomeEncoder.decode(encodedOutcomes[i]);
scoredOutcomes.put(outcome, scores[i]);
}
return scoredOutcomes;
}
示例2: testLinearModel
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
private static Prediction[] testLinearModel(LibLINEARModel model, Feature[][] problem) {
Prediction[] pred = new Prediction[problem.length];
for (int i = 0; i < problem.length; i++) {
double[] decVal = new double[(model.getModel().getNrClass() <= 2) ? 1 : model.getModel().getNrClass()];
if (!model.hasProbabilities()) {
pred[i] = new Prediction(Linear.predictValues(model.getModel(), problem[i], decVal), i);
pred[i].setProbabilities(false);
} else {
pred[i] = new Prediction(Linear.predictProbability(model.getModel(), problem[i], decVal), i);
pred[i].setProbabilities(true);
}
pred[i].setDecisionValue(decVal);
pred[i].setClassLabels(model.getModel().getLabels());
pred[i].setPairWise(false); // LibLINEAR does not do pairwise multiclass prediction, but 1 vs all
pred[i].setUsedKernel(model.getKernelSetting());
}
return pred;
}
示例3: predict2
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
@Deprecated
public int[] predict2(Feature[][] data, int[] labels) {
int N = data.length;
int[] pre_label = new int[N];
double[] values = new double[nClass];
for ( int i = 0; i < N; i ++ ) {
pre_label[i] = Linear.predictValues(model, data[i], values);
}
if (labels != null) {
int cnt_correct = 0;
for ( int i = 0; i < N; i ++ ) {
if ( pre_label[i] == labels[i] )
cnt_correct ++;
}
double accuracy = (double)cnt_correct / (double)N;
System.out.println(String.format("Accuracy: %.2f%%\n", accuracy * 100));
}
return pre_label;
}
示例4: predict
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
@Override
protected double[] predict(Model model, FeatureNode[] featuresNodes) {
double[] temp,probs;
int classes,label;
double sum = 0;
classes = model.getNrClass();
temp = new double[classes];
Linear.predictValues(model, featuresNodes, temp);
for (int i = 0; i < classes; i++) {
temp[i] = 1 / (1 + Math.exp(-temp[i]));
sum += temp[i];
}
probs=new double[classes];
for(int i=0;i<classes;i++){
label = model.getLabels()[i];
if (label > 0)
probs[label-1]=temp[i]/sum;
}
return probs;
}
示例5: performPrediction
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
@Override
public ExampleSet performPrediction(ExampleSet exampleSet, Attribute predictedLabel) throws OperatorException {
FastExample2SparseTransform ripper = new FastExample2SparseTransform(exampleSet);
Attribute label = getLabel();
Attribute[] confidenceAttributes = null;
if (label.isNominal() && label.getMapping().size() >= 2) {
confidenceAttributes = new Attribute[linearModel.label.length];
for (int j = 0; j < linearModel.label.length; j++) {
String labelName = label.getMapping().mapIndex(linearModel.label[j]);
confidenceAttributes[j] = exampleSet.getAttributes()
.getSpecial(Attributes.CONFIDENCE_NAME + "_" + labelName);
}
}
Iterator<Example> i = exampleSet.iterator();
while (i.hasNext()) {
Example e = i.next();
// set prediction
FeatureNode[] currentNodes = FastLargeMargin.makeNodes(e, ripper, this.useBias);
double predictedClass = Linear.predict(linearModel, currentNodes);
e.setValue(predictedLabel, predictedClass);
// use simple calculation for binary cases...
if (label.getMapping().size() == 2) {
double[] functionValues = new double[linearModel.nr_class];
Linear.predictValues(linearModel, currentNodes, functionValues);
double prediction = functionValues[0];
if (confidenceAttributes != null && confidenceAttributes.length > 0) {
e.setValue(confidenceAttributes[0], 1.0d / (1.0d + java.lang.Math.exp(-prediction)));
if (confidenceAttributes.length > 1) {
e.setValue(confidenceAttributes[1], 1.0d / (1.0d + java.lang.Math.exp(prediction)));
}
}
}
}
return exampleSet;
}
示例6: distributionForInstance
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
/**
* Computes the distribution for a given instance.
* <br>
* Portions of the code were taken from the LibLINEAR class. Original author: Benedikt Waldvogel (mail at bwaldvogel.de)
*
* @param instance The instance for which distribution is computed
* @return The distribution
* @throws Exception If the distribution can't be computed successfully
*/
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
///////////////////////////// Copied from LibLINEAR class /////////////////////////////////
m_ReplaceMissingValues.input(instance);
m_ReplaceMissingValues.batchFinished();
instance = m_ReplaceMissingValues.output();
m_NominalToBinary.input(instance);
m_NominalToBinary.batchFinished();
instance = m_NominalToBinary.output();
if (m_Filter != null) {
m_Filter.input(instance);
m_Filter.batchFinished();
instance = m_Filter.output();
}
double[] result = new double[instance.numClasses()];
///////////////////////////////////////////////////////////////////////////////////////////
if (instance.classAttribute().isNominal() && (m_ProbabilityEstimates))
if (m_SolverType != SolverType.L2R_LR && m_SolverType != SolverType.L2R_LR_DUAL && m_SolverType != SolverType.L1R_LR)
throw new WekaException("Probability estimation is currently only " + "supported for logistic regression");
for (int modelInd = 0; modelInd < models.length; modelInd++) {
FeatureNode[] x = instanceToArray(instance, modelInd);
double[] dec_values = new double[1];
Linear.predictValues(models[modelInd], x, dec_values);
// The result value is the distance from the separating hyperplane for the class that is being considered
// If the distance is positive - the instance belongs to the class that is being considered; if it is negative - it does not
// We do not remap the labels here since LibLINEAR always puts the +1 class at index 0, and we assigned the +1 value in training to the class whose binary one-vs-all classifier this is
result[modelInd] = dec_values[0];
}
if (!m_ProbabilityEstimates) {
// In the multiclass setting, the chosen class is the one with the largest distance from the separating hyperplane
// In a binary setting there is only one value - if it is greater than 0 (i.e. instance does belong to class[0]) then maxInd remains = 0, else it is changed to 1
int maxInd = 0;
for (int i = 1; i < result.length; i++)
if (result[i] > result[maxInd])
maxInd = i;
result = new double[instance.numClasses()];
result[maxInd] = 1;
return result;
}
else {
// Calculates the probabilities in the same way as in the LibLINEAR and Linear classes
double [] prob_estimates = new double[instance.numClasses()];
for (int i = 0; i < prob_estimates.length; i++)
prob_estimates[i] = 1 / (1 + Math.exp(-result[i]));
if (instance.numClasses() == 2) // for binary classification
prob_estimates[1] = 1. - prob_estimates[0];
else {
double sum = 0;
for (int i = 0; i < instance.numClasses(); i++)
sum += prob_estimates[i];
for (int i = 0; i < instance.numClasses(); i++)
prob_estimates[i] = prob_estimates[i] / sum;
}
return prob_estimates;
}
}
示例7: performPrediction
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
@Override
public ExampleSet performPrediction(ExampleSet exampleSet, Attribute predictedLabel) throws OperatorException {
FastExample2SparseTransform ripper = new FastExample2SparseTransform(exampleSet);
Attribute label = getLabel();
Attribute[] confidenceAttributes = null;
if (label.isNominal() && label.getMapping().size() >= 2) {
confidenceAttributes = new Attribute[linearModel.label.length];
for (int j = 0; j < linearModel.label.length; j++) {
String labelName = label.getMapping().mapIndex(linearModel.label[j]);
confidenceAttributes[j] = exampleSet.getAttributes()
.getSpecial(Attributes.CONFIDENCE_NAME + "_" + labelName);
}
}
Iterator<Example> i = exampleSet.iterator();
OperatorProgress progress = null;
if (getShowProgress() && getOperator() != null && getOperator().getProgress() != null) {
progress = getOperator().getProgress();
progress.setTotal(exampleSet.size());
}
int progressCounter = 0;
while (i.hasNext()) {
Example e = i.next();
// set prediction
FeatureNode[] currentNodes = FastLargeMargin.makeNodes(e, ripper, this.useBias);
double predictedClass = Linear.predict(linearModel, currentNodes);
e.setValue(predictedLabel, predictedClass);
// use simple calculation for binary cases...
if (label.getMapping().size() == 2) {
double[] functionValues = new double[linearModel.nr_class];
Linear.predictValues(linearModel, currentNodes, functionValues);
double prediction = functionValues[0];
if (confidenceAttributes != null && confidenceAttributes.length > 0) {
e.setValue(confidenceAttributes[0], 1.0d / (1.0d + java.lang.Math.exp(-prediction)));
if (confidenceAttributes.length > 1) {
e.setValue(confidenceAttributes[1], 1.0d / (1.0d + java.lang.Math.exp(prediction)));
}
}
}
if (progress != null && ++progressCounter % OPERATOR_PROGRESS_STEPS == 0) {
progress.setCompleted(progressCounter);
}
}
return exampleSet;
}
示例8: liblinear_predict_with_kbestlist
import de.bwaldvogel.liblinear.Linear; //导入方法依赖的package包/类
public void liblinear_predict_with_kbestlist(Model model, FeatureNode[] x, KBestList kBestList) throws MaltChainedException {
int i;
final int nr_class = model.getNrClass();
final double[] dec_values = new double[nr_class];
Linear.predictValues(model, x, dec_values);
final int[] labels = model.getLabels();
int[] predictionList = new int[nr_class];
for(i=0;i<nr_class;i++) {
predictionList[i] = labels[i];
}
double tmpDec;
int tmpObj;
int lagest;
for (i=0;i<nr_class-1;i++) {
lagest = i;
for (int j=i;j<nr_class;j++) {
if (dec_values[j] > dec_values[lagest]) {
lagest = j;
}
}
tmpDec = dec_values[lagest];
dec_values[lagest] = dec_values[i];
dec_values[i] = tmpDec;
tmpObj = predictionList[lagest];
predictionList[lagest] = predictionList[i];
predictionList[i] = tmpObj;
}
int k = nr_class-1;
if (kBestList.getK() != -1) {
k = kBestList.getK() - 1;
}
for (i=0; i<nr_class && k >= 0; i++, k--) {
if (kBestList instanceof ScoredKBestList) {
((ScoredKBestList)kBestList).add(predictionList[i], (float)dec_values[i]);
} else {
kBestList.add(predictionList[i]);
}
}
}