本文整理汇总了Java中de.bwaldvogel.liblinear.Linear类的典型用法代码示例。如果您正苦于以下问题:Java Linear类的具体用法?Java Linear怎么用?Java Linear使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Linear类属于de.bwaldvogel.liblinear包,在下文中一共展示了Linear类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: learn
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
/** Learns a new SVM model with the LibSVM package. */
@Override
public Model learn(ExampleSet exampleSet) throws OperatorException {
Parameter params = getParameters(exampleSet);
if (exampleSet.size() < 2) {
throw new UserError(this, 110, 2);
}
Linear.resetRandom();
Linear.disableDebugOutput();
Problem problem = getProblem(exampleSet);
de.bwaldvogel.liblinear.Model model = Linear.train(problem, params);
return new FastMarginModel(exampleSet, model, getParameterAsBoolean(PARAMETER_USE_BIAS));
}
示例2: getFeatureImportance
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
/**
* @param gatherer
* @param features
* @return an array of feature IDs (>=1), ordered by feature importance, without zero-importance features.
*/
private static <T extends Serializable, G extends Serializable> int[] getFeatureImportance(ExampleGatherer<T, G> gatherer,
int[] features) {
ZScoreFeatureNormalizer scaleFn = ZScoreFeatureNormalizer.fromGatherer(gatherer);
Parameter param = new Parameter(SolverType.L2R_L2LOSS_SVR, 0.01, 0.001);
Problem problem = gatherer.generateLibLinearProblem(features, scaleFn);
Model m = Linear.train(problem, param);
double[] weights = m.getFeatureWeights();
int[] ftrImportance = Arrays.stream(features).boxed().sorted(new Comparator<Integer>() {
@Override
public int compare(Integer fId0, Integer fId1) {
return Double.compare(Math.abs(weights[ArrayUtils.indexOf(features, fId0)]), Math.abs(ArrayUtils.indexOf(features, fId1)));
}
}).filter(fId -> weights[ArrayUtils.indexOf(features, fId)] != 0.0).mapToInt(fId -> fId.intValue()).toArray();
return ftrImportance;
}
示例3: serialize
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public void serialize(OutputStream out) throws IOException {
DataOutputStream ds = new DataOutputStream(out);
ByteArrayOutputStream modelBytes = new ByteArrayOutputStream();
Linear.saveModel(new OutputStreamWriter(modelBytes, LIBLINEAR_MODEL_ENCODING), model);
ds.writeInt(modelBytes.size());
ds.write(modelBytes.toByteArray());
// write string array
// write label count
ds.writeInt(outcomeLabels.length);
// write each label
for (String outcomeLabel : outcomeLabels) {
ds.writeUTF(outcomeLabel);
}
// write entry count
ds.writeInt(predMap.size());
for (Map.Entry<String, Integer> entry : predMap.entrySet()) {
ds.writeUTF(entry.getKey());
ds.writeInt(entry.getValue());
}
}
示例4: trainSvm
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
/**
* Train SVM model. Return alpha and w matrix.
*
* */
public StoreAlphaWeight trainSvm(File saveModel) throws Exception{
StoreAlphaWeight saww=new StoreAlphaWeight();
this.modelFile=saveModel;
Problem problem=new Problem();
problem.l=train;
problem.n=dimensions;
problem.x=vectrain;
problem.y=trainattr;
SolverType s=SolverType.MCSVM_CS;
Parameter parameter = new Parameter(s, C, eps);
Model modelg = Linear.train(problem, parameter, saww);
try {
modelg.save(saveModel);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return saww;
}
示例5: evaluateSvm
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public double[] evaluateSvm() throws Exception{
int right=0;
Model model = Model.load(modelFile);
for(int t=0;t<test;t++){
double prediction = Linear.predict(model, vectest[t]);
if(prediction==testattr[t]){
right++;
}
}
double precision=(double)right/test;
System.err.println("*************Precision = "+precision*100+"%*************");
double storeResult[]=new double[3];
storeResult[0]=right;
storeResult[1]=test;
storeResult[2]=precision;
return storeResult;
}
示例6: train
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public void train(List<Pair<CounterInterface<Integer>,Integer>> trainSet) {
Problem problem = new Problem();
FeatureNode[][] x = new FeatureNode[trainSet.size()][];
double[] y = new double[trainSet.size()];
int maxFeature = 0;
for (int i=0; i<x.length; ++i) {
CounterInterface<Integer> features = trainSet.get(i).getFirst();
for (Map.Entry<Integer, Double> feat : features.entries()) {
maxFeature = Math.max(feat.getKey()+1, maxFeature);
}
x[i] = convertToFeatureNodes(features);
y[i] = trainSet.get(i).getSecond();
}
problem.l = trainSet.size();
problem.n = maxFeature;
problem.x = x;
problem.y = y;
problem.bias = 0.0;
Parameter parameter = new Parameter(solverType, C, eps);
model = Linear.train(problem, parameter);
}
示例7: predictOne
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public Matrix predictOne(Feature[] x) {
Matrix result = null;
if (model.isProbabilityModel()) {
double[] probabilities = new double[model.getNrClass()];
Linear.predictProbability(model, x, probabilities);
result = Matrix.Factory.zeros(1, model.getNrClass());
for (int i = 0; i < probabilities.length; i++) {
int label = model.getLabels()[i];
result.setAsDouble(probabilities[i], 0, label);
}
} else {
double classId = Linear.predict(model, x);
result = Matrix.Factory.zeros(1, Math.max(model.getNrClass(), (int) (classId + 1)));
result.setAsDouble(1.0, 0, (int) classId);
}
return result;
}
示例8: score
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
@Override
public Map<OUTCOME_TYPE, Double> score(List<Feature> features) throws CleartkProcessingException {
FeatureNode[] encodedFeatures = this.featuresEncoder.encodeAll(features);
// get score for each outcome
int[] encodedOutcomes = this.model.getLabels();
double[] scores = new double[encodedOutcomes.length];
if (this.model.isProbabilityModel()) {
Linear.predictProbability(this.model, encodedFeatures, scores);
} else {
Linear.predictValues(this.model, encodedFeatures, scores);
}
// handle 2-class model, which is special-cased by LIBLINEAR to only return one score
if (this.model.getNrClass() == 2 && scores[1] == 0.0) {
scores[1] = -scores[0];
}
// create scored outcome objects
Map<OUTCOME_TYPE, Double> scoredOutcomes = Maps.newHashMap();
for (int i = 0; i < encodedOutcomes.length; ++i) {
OUTCOME_TYPE outcome = this.outcomeEncoder.decode(encodedOutcomes[i]);
scoredOutcomes.put(outcome, scores[i]);
}
return scoredOutcomes;
}
示例9: testLinearModel
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
private static Prediction[] testLinearModel(LibLINEARModel model, Feature[][] problem) {
Prediction[] pred = new Prediction[problem.length];
for (int i = 0; i < problem.length; i++) {
double[] decVal = new double[(model.getModel().getNrClass() <= 2) ? 1 : model.getModel().getNrClass()];
if (!model.hasProbabilities()) {
pred[i] = new Prediction(Linear.predictValues(model.getModel(), problem[i], decVal), i);
pred[i].setProbabilities(false);
} else {
pred[i] = new Prediction(Linear.predictProbability(model.getModel(), problem[i], decVal), i);
pred[i].setProbabilities(true);
}
pred[i].setDecisionValue(decVal);
pred[i].setClassLabels(model.getModel().getLabels());
pred[i].setPairWise(false); // LibLINEAR does not do pairwise multiclass prediction, but 1 vs all
pred[i].setUsedKernel(model.getKernelSetting());
}
return pred;
}
示例10: train
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public static void train() throws IOException, InvalidInputDataException{
String file = "output\\svm/book_svm.svm";
Problem problem = Problem.readFromFile(new File(file),-1);
SolverType solver = SolverType.L2R_LR; // -s 0
double C = 1.0; // cost of constraints violation
double eps = 0.01; // stopping criteria
Parameter parameter = new Parameter(solver, C, eps);
Model model = Linear.train(problem, parameter);
File modelFile = new File("output/model");
model.save(modelFile);
System.out.println(modelFile.getAbsolutePath());
// load model or use it directly
model = Model.load(modelFile);
Feature[] instance = { new FeatureNode(1, 4), new FeatureNode(2, 2) };
double prediction = Linear.predict(model, instance);
System.out.println(prediction);
int nr_fold = 10;
double[] target = new double[problem.l];
Linear.crossValidation(problem, parameter, nr_fold, target);
}
示例11: predict2
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
@Deprecated
public static int[] predict2(Model model, Feature[][] data, int[] labels) {
int N = data.length;
int[] pre_label = new int[N];
for ( int i = 0; i < N; i ++ ) {
pre_label[i] = Linear.predict(model, data[i]);
}
if (labels != null) {
int cnt_correct = 0;
for ( int i = 0; i < N; i ++ ) {
if ( pre_label[i] == labels[i] )
cnt_correct ++;
}
double accuracy = (double)cnt_correct / (double)N;
System.out.println(String.format("Accuracy: %.2f%%\n", accuracy * 100));
}
return pre_label;
}
示例12: analyze
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
@Override
public Analysis analyze(Analyzable a) throws AnalyzerFailureException {
if (a == null) return null;
if (!(a instanceof IdentifiableTextContent)) {
throw new AnalyzerFailureException("Analyzable not identifiable. This analyzer requires an IdentifiableTextContent.");
}
IdentifiableTextContent tc = (IdentifiableTextContent) a;
try {
Feature[] vector = LibLinearUtils.toLibLinear(representer.represent(tc.getText()).toSvmNodes());
double[] probs = new double[labels.size()];
Linear.predictProbability(model, vector, probs);
ClassificationAnalysis analysis = new ClassificationAnalysis(tc.getId());
for (int i = 0; i < labelIndeces.length; ++i) {
analysis.addClassification(labels.get(labelIndeces[i]), probs[i] >= threshold ? probs[i] : 0d);
}
return analysis;
} catch (Exception e) {
throw new AnalyzerFailureException("Classifier failed on record " + tc.getId(), e);
}
}
示例13: getLabel
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
@Override
public String getLabel(JCas cas) {
Vector<Feature[]> instanceFeatures = applyFeatures(cas, features);
Feature[] instance = combineInstanceFeatures(instanceFeatures);
probEstimates = new double[model.getNrClass()];
Double prediction;
if (model.getSolverType().isLogisticRegressionSolver()) {
prediction = Linear.predictProbability(model, instance, probEstimates);
score = probEstimates[prediction.intValue()];
} else {
prediction = Linear.predict(model, instance);
}
label = labelMappings.get(prediction);
return label;
}
示例14: getLabel
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
@Override
public String getLabel(JCas cas) {
Vector<Feature[]> instanceFeatures = applyFeatures(cas, features);
Feature[] instance = combineInstanceFeatures(instanceFeatures);
probEstimates = new double[model.getNrClass()];
Double prediction = Linear.predictProbability(model, instance, probEstimates);
label = labelMappings.get(prediction);
score = probEstimates[prediction.intValue()];
return label;
}
示例15: LiblinearModel
import de.bwaldvogel.liblinear.Linear; //导入依赖的package包/类
public LiblinearModel(InputStream in) throws IOException {
DataInputStream di = new DataInputStream(in);
int modelByteLength = di.readInt();
// TODO: We should have a fixed memory limit here ...
byte modelBytes[] = new byte[modelByteLength];
di.read(modelBytes);
int outcomeLabelLength = di.readInt();
outcomeLabels = new String[outcomeLabelLength];
for (int i = 0; i < outcomeLabelLength; i++) {
outcomeLabels[i] = di.readUTF();
}
predMap = new HashMap<String, Integer>();
int predMapSize = di.readInt();
for (int i = 0; i < predMapSize; i++) {
String key = di.readUTF();
int value = di.readInt();
predMap.put(key, value);
}
model = Linear.loadModel(new InputStreamReader(new ByteArrayInputStream(modelBytes), LIBLINEAR_MODEL_ENCODING));
}