本文整理汇总了Java中org.jpmml.converter.Schema.getLabel方法的典型用法代码示例。如果您正苦于以下问题:Java Schema.getLabel方法的具体用法?Java Schema.getLabel怎么用?Java Schema.getLabel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.jpmml.converter.Schema
的用法示例。
在下文中一共展示了Schema.getLabel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeMiningModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public MiningModel encodeMiningModel(List<Tree> trees, Integer numIteration, Schema schema){
Schema segmentSchema = new Schema(new ContinuousLabel(null, DataType.DOUBLE), schema.getFeatures());
List<MiningModel> miningModels = new ArrayList<>();
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
for(int i = 0, rows = categoricalLabel.size(), columns = (trees.size() / rows); i < rows; i++){
MiningModel miningModel = createMiningModel(FortranMatrixUtil.getRow(trees, rows, columns, i), numIteration, segmentSchema)
.setOutput(ModelUtil.createPredictedOutput(FieldName.create("lgbmValue(" + categoricalLabel.getValue(i) + ")"), OpType.CONTINUOUS, DataType.DOUBLE));
miningModels.add(miningModel);
}
return MiningModelUtil.createClassification(miningModels, RegressionModel.NormalizationMethod.SOFTMAX, true, schema);
}
示例2: encodeMiningModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public MiningModel encodeMiningModel(List<RegTree> regTrees, float base_score, Integer ntreeLimit, Schema schema){
Schema segmentSchema = new Schema(new ContinuousLabel(null, DataType.FLOAT), schema.getFeatures());
List<MiningModel> miningModels = new ArrayList<>();
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
for(int i = 0, columns = categoricalLabel.size(), rows = (regTrees.size() / columns); i < columns; i++){
MiningModel miningModel = createMiningModel(CMatrixUtil.getColumn(regTrees, rows, columns, i), base_score, ntreeLimit, segmentSchema)
.setOutput(ModelUtil.createPredictedOutput(FieldName.create("xgbValue(" + categoricalLabel.getValue(i) + ")"), OpType.CONTINUOUS, DataType.FLOAT));
miningModels.add(miningModel);
}
return MiningModelUtil.createClassification(miningModels, RegressionModel.NormalizationMethod.SOFTMAX, true, schema);
}
示例3: encodeModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public GeneralRegressionModel encodeModel(Schema schema){
GeneralizedLinearRegressionModel model = getTransformer();
String targetCategory = null;
MiningFunction miningFunction = getMiningFunction();
switch(miningFunction){
case CLASSIFICATION:
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
if(categoricalLabel.size() != 2){
throw new IllegalArgumentException();
}
targetCategory = categoricalLabel.getValue(1);
break;
default:
break;
}
GeneralRegressionModel generalRegressionModel = new GeneralRegressionModel(GeneralRegressionModel.ModelType.GENERALIZED_LINEAR, miningFunction, ModelUtil.createMiningSchema(schema.getLabel()), null, null, null)
.setDistribution(parseFamily(model.getFamily()))
.setLinkFunction(parseLinkFunction(model.getLink()))
.setLinkParameter(parseLinkParameter(model.getLink()));
GeneralRegressionModelUtil.encodeRegressionTable(generalRegressionModel, schema.getFeatures(), model.intercept(), VectorUtil.toList(model.coefficients()), targetCategory);
return generalRegressionModel;
}
示例4: createMiningModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
protected MiningModel createMiningModel(List<RegTree> regTrees, float base_score, Integer ntreeLimit, Schema schema){
ContinuousLabel continuousLabel = (ContinuousLabel)schema.getLabel();
Schema segmentSchema = schema.toAnonymousSchema();
List<TreeModel> treeModels = new ArrayList<>();
if(ntreeLimit != null){
if(ntreeLimit > regTrees.size()){
throw new IllegalArgumentException("Tree limit " + ntreeLimit + " is greater than the number of trees");
}
regTrees = regTrees.subList(0, ntreeLimit);
}
for(RegTree regTree : regTrees){
TreeModel treeModel = regTree.encodeTreeModel(segmentSchema);
treeModels.add(treeModel);
}
MiningModel miningModel = new MiningModel(MiningFunction.REGRESSION, ModelUtil.createMiningSchema(continuousLabel))
.setMathContext(MathContext.FLOAT)
.setSegmentation(MiningModelUtil.createSegmentation(Segmentation.MultipleModelMethod.SUM, treeModels))
.setTargets(ModelUtil.createRescaleTargets(null, ValueUtil.floatToDouble(base_score), continuousLabel));
return miningModel;
}
示例5: createRegression
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
public RegressionModel createRegression(MathContext mathContext, List<? extends Feature> features, List<Double> coefficients, Double intercept, RegressionModel.NormalizationMethod normalizationMethod, Schema schema){
ContinuousLabel continuousLabel = (ContinuousLabel)schema.getLabel();
if(normalizationMethod != null){
switch(normalizationMethod){
case NONE:
case SOFTMAX:
case LOGIT:
case PROBIT:
case CLOGLOG:
case EXP:
case LOGLOG:
case CAUCHIT:
break;
default:
throw new IllegalArgumentException();
}
}
RegressionModel regressionModel = new RegressionModel(MiningFunction.REGRESSION, ModelUtil.createMiningSchema(continuousLabel), null)
.setNormalizationMethod(normalizationMethod)
.setMathContext(ModelUtil.simplifyMathContext(mathContext))
.addRegressionTables(createRegressionTable(features, coefficients, intercept));
return regressionModel;
}
示例6: createBinaryLogisticClassification
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
public RegressionModel createBinaryLogisticClassification(MathContext mathContext, List<? extends Feature> features, List<Double> coefficients, Double intercept, RegressionModel.NormalizationMethod normalizationMethod, boolean hasProbabilityDistribution, Schema schema){
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
if(categoricalLabel.size() != 2){
throw new IllegalArgumentException();
} // End if
if(normalizationMethod != null){
switch(normalizationMethod){
case NONE:
case LOGIT:
case PROBIT:
case CLOGLOG:
case LOGLOG:
case CAUCHIT:
break;
default:
throw new IllegalArgumentException();
}
}
RegressionTable activeRegressionTable = RegressionModelUtil.createRegressionTable(features, coefficients, intercept)
.setTargetCategory(categoricalLabel.getValue(1));
RegressionTable passiveRegressionTable = RegressionModelUtil.createRegressionTable(Collections.<Feature>emptyList(), Collections.<Double>emptyList(), null)
.setTargetCategory(categoricalLabel.getValue(0));
RegressionModel regressionModel = new RegressionModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel), null)
.setNormalizationMethod(normalizationMethod)
.setMathContext(ModelUtil.simplifyMathContext(mathContext))
.addRegressionTables(activeRegressionTable, passiveRegressionTable)
.setOutput(hasProbabilityDistribution ? ModelUtil.createProbabilityOutput(mathContext, categoricalLabel) : null);
return regressionModel;
}
示例7: encodeGradientBoosting
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
public <E extends Estimator & HasEstimatorEnsemble<DecisionTreeRegressor> & HasTreeOptions> MiningModel encodeGradientBoosting(E estimator, Number initialPrediction, Number learningRate, Schema schema){
ContinuousLabel continuousLabel = (ContinuousLabel)schema.getLabel();
List<TreeModel> treeModels = TreeModelUtil.encodeTreeModelSegmentation(estimator, MiningFunction.REGRESSION, schema);
MiningModel miningModel = new MiningModel(MiningFunction.REGRESSION, ModelUtil.createMiningSchema(continuousLabel))
.setSegmentation(MiningModelUtil.createSegmentation(Segmentation.MultipleModelMethod.SUM, treeModels))
.setTargets(ModelUtil.createRescaleTargets(learningRate, initialPrediction, continuousLabel));
return TreeModelUtil.transform(estimator, miningModel);
}
示例8: encodeClassificationScore
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
private Node encodeClassificationScore(Node node, RDoubleVector probabilities, Schema schema){
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
if(categoricalLabel.size() != probabilities.size()){
throw new IllegalArgumentException();
}
Double maxProbability = null;
for(int i = 0; i < categoricalLabel.size(); i++){
String value = categoricalLabel.getValue(i);
Double probability = probabilities.getValue(i);
if(maxProbability == null || (maxProbability).compareTo(probability) < 0){
node.setScore(value);
maxProbability = probability;
}
ScoreDistribution scoreDistribution = new ScoreDistribution(value, probability);
node.addScoreDistributions(scoreDistribution);
}
return node;
}
示例9: createMiningModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
private MiningModel createMiningModel(List<TreeModel> treeModels, Double initF, Schema schema){
ContinuousLabel continuousLabel = (ContinuousLabel)schema.getLabel();
MiningModel miningModel = new MiningModel(MiningFunction.REGRESSION, ModelUtil.createMiningSchema(continuousLabel))
.setSegmentation(MiningModelUtil.createSegmentation(Segmentation.MultipleModelMethod.SUM, treeModels))
.setTargets(ModelUtil.createRescaleTargets(null, initF, continuousLabel));
return miningModel;
}
示例10: createClassification
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
static
public MiningModel createClassification(List<? extends Model> models, RegressionModel.NormalizationMethod normalizationMethod, boolean hasProbabilityDistribution, Schema schema){
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
if(categoricalLabel.size() < 3 || categoricalLabel.size() != models.size()){
throw new IllegalArgumentException();
} // End if
if(normalizationMethod != null){
switch(normalizationMethod){
case NONE:
case SIMPLEMAX:
case SOFTMAX:
break;
default:
throw new IllegalArgumentException();
}
}
MathContext mathContext = null;
List<RegressionTable> regressionTables = new ArrayList<>();
for(int i = 0; i < categoricalLabel.size(); i++){
Model model = models.get(i);
MathContext modelMathContext = model.getMathContext();
if(modelMathContext == null){
modelMathContext = MathContext.DOUBLE;
} // End if
if(mathContext == null){
mathContext = modelMathContext;
} else
{
if(!Objects.equals(mathContext, modelMathContext)){
throw new IllegalArgumentException();
}
}
Feature feature = MiningModelUtil.MODEL_PREDICTION.apply(model);
RegressionTable regressionTable = RegressionModelUtil.createRegressionTable(Collections.singletonList(feature), Collections.singletonList(1d), null)
.setTargetCategory(categoricalLabel.getValue(i));
regressionTables.add(regressionTable);
}
RegressionModel regressionModel = new RegressionModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel), regressionTables)
.setNormalizationMethod(normalizationMethod)
.setMathContext(ModelUtil.simplifyMathContext(mathContext))
.setOutput(hasProbabilityDistribution ? ModelUtil.createProbabilityOutput(mathContext, categoricalLabel) : null);
List<Model> segmentationModels = new ArrayList<>(models);
segmentationModels.add(regressionModel);
return createModelChain(segmentationModels, schema);
}
示例11: encodeModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public Model encodeModel(Schema schema){
List<? extends Classifier> estimators = getEstimators();
List<? extends Number> weights = getWeights();
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
List<Model> models = new ArrayList<>();
for(Classifier estimator : estimators){
Model model = estimator.encodeModel(schema);
models.add(model);
}
String voting = getVoting();
Segmentation.MultipleModelMethod multipleModelMethod = parseVoting(voting, (weights != null && weights.size() > 0));
MiningModel miningModel = new MiningModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel))
.setSegmentation(MiningModelUtil.createSegmentation(multipleModelMethod, models, weights))
.setOutput(ModelUtil.createProbabilityOutput(DataType.DOUBLE, categoricalLabel));
return miningModel;
}
示例12: encodeModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public RegressionModel encodeModel(Schema schema){
int[] shape = getCoefShape();
int numberOfClasses = shape[0];
int numberOfFeatures = shape[1];
boolean hasProbabilityDistribution = hasProbabilityDistribution();
List<? extends Number> coef = getCoef();
List<? extends Number> intercepts = getIntercept();
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
List<Feature> features = schema.getFeatures();
if(numberOfClasses == 1){
ClassifierUtil.checkSize(2, categoricalLabel);
return RegressionModelUtil.createBinaryLogisticClassification(features, ValueUtil.asDoubles(CMatrixUtil.getRow(coef, numberOfClasses, numberOfFeatures, 0)), ValueUtil.asDouble(intercepts.get(0)), RegressionModel.NormalizationMethod.LOGIT, hasProbabilityDistribution, schema);
} else
if(numberOfClasses >= 3){
ClassifierUtil.checkSize(numberOfClasses, categoricalLabel);
List<RegressionTable> regressionTables = new ArrayList<>();
for(int i = 0, rows = categoricalLabel.size(); i < rows; i++){
RegressionTable regressionTable = RegressionModelUtil.createRegressionTable(features, ValueUtil.asDoubles(CMatrixUtil.getRow(coef, numberOfClasses, numberOfFeatures, i)), ValueUtil.asDouble(intercepts.get(i)))
.setTargetCategory(categoricalLabel.getValue(i));
regressionTables.add(regressionTable);
}
RegressionModel regressionModel = new RegressionModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel), regressionTables)
.setNormalizationMethod(RegressionModel.NormalizationMethod.LOGIT)
.setOutput(hasProbabilityDistribution ? ModelUtil.createProbabilityOutput(DataType.DOUBLE, categoricalLabel) : null);
return regressionModel;
} else
{
throw new IllegalArgumentException();
}
}
示例13: encodeModel
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
@Override
public TreeModel encodeModel(Schema schema){
List<?> classes = getClasses();
List<? extends Number> classPrior = getClassPrior();
Object constant = getConstant();
String strategy = getStrategy();
ClassDictUtil.checkSize(classes, classPrior);
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
int index;
double[] probabilities;
switch(strategy){
case "constant":
{
index = classes.indexOf(constant);
probabilities = new double[classes.size()];
probabilities[index] = 1d;
}
break;
case "most_frequent":
{
index = classPrior.indexOf(Collections.max((List)classPrior));
probabilities = new double[classes.size()];
probabilities[index] = 1d;
}
break;
case "prior":
{
index = classPrior.indexOf(Collections.max((List)classPrior));
probabilities = Doubles.toArray(classPrior);
}
break;
default:
throw new IllegalArgumentException(strategy);
}
Node root = new Node()
.setPredicate(new True())
.setScore(ValueUtil.formatValue(classes.get(index)));
for(int i = 0; i < classes.size(); i++){
ScoreDistribution scoreDistribution = new ScoreDistribution(ValueUtil.formatValue(classes.get(i)), probabilities[i]);
root.addScoreDistributions(scoreDistribution);
}
TreeModel treeModel = new TreeModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel), root)
.setOutput(ModelUtil.createProbabilityOutput(DataType.DOUBLE, categoricalLabel));
return treeModel;
}
示例14: encodeProbabilityForest
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
private MiningModel encodeProbabilityForest(RGenericVector ranger, Schema schema){
RGenericVector forest = (RGenericVector)ranger.getValue("forest");
final
RStringVector levels = (RStringVector)forest.getValue("levels");
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
ScoreEncoder scoreEncoder = new ScoreEncoder(){
@Override
public void encode(Node node, Number splitValue, RNumberVector<?> terminalClassCount){
if(splitValue.doubleValue() != 0d || (terminalClassCount == null || terminalClassCount.size() != levels.size())){
throw new IllegalArgumentException();
}
Double maxProbability = null;
for(int i = 0; i < terminalClassCount.size(); i++){
String value = levels.getValue(i);
Double probability = ValueUtil.asDouble(terminalClassCount.getValue(i));
if(maxProbability == null || (maxProbability).compareTo(probability) < 0){
node.setScore(value);
maxProbability = probability;
}
ScoreDistribution scoreDisctibution = new ScoreDistribution(value, probability);
node.addScoreDistributions(scoreDisctibution);
}
}
};
List<TreeModel> treeModels = encodeForest(forest, MiningFunction.CLASSIFICATION, scoreEncoder, schema);
MiningModel miningModel = new MiningModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel))
.setSegmentation(MiningModelUtil.createSegmentation(Segmentation.MultipleModelMethod.AVERAGE, treeModels))
.setOutput(ModelUtil.createProbabilityOutput(DataType.DOUBLE, categoricalLabel));
return miningModel;
}
示例15: encodeClassification
import org.jpmml.converter.Schema; //导入方法依赖的package包/类
private MiningModel encodeClassification(RGenericVector forest, final Schema schema){
RNumberVector<?> bestvar = (RNumberVector<?>)forest.getValue("bestvar");
RNumberVector<?> treemap = (RNumberVector<?>)forest.getValue("treemap");
RIntegerVector nodepred = (RIntegerVector)forest.getValue("nodepred");
RDoubleVector xbestsplit = (RDoubleVector)forest.getValue("xbestsplit");
RIntegerVector nrnodes = (RIntegerVector)forest.getValue("nrnodes");
RDoubleVector ntree = (RDoubleVector)forest.getValue("ntree");
int rows = nrnodes.asScalar();
int columns = ValueUtil.asInt(ntree.asScalar());
final
CategoricalLabel categoricalLabel = (CategoricalLabel)schema.getLabel();
ScoreEncoder<Integer> scoreEncoder = new ScoreEncoder<Integer>(){
@Override
public String encode(Integer value){
return categoricalLabel.getValue(value - 1);
}
};
Schema segmentSchema = schema.toAnonymousSchema();
List<TreeModel> treeModels = new ArrayList<>();
for(int i = 0; i < columns; i++){
List<? extends Number> daughters = FortranMatrixUtil.getColumn(treemap.getValues(), 2 * rows, columns, i);
TreeModel treeModel = encodeTreeModel(
MiningFunction.CLASSIFICATION,
scoreEncoder,
FortranMatrixUtil.getColumn(daughters, rows, 2, 0),
FortranMatrixUtil.getColumn(daughters, rows, 2, 1),
FortranMatrixUtil.getColumn(nodepred.getValues(), rows, columns, i),
FortranMatrixUtil.getColumn(bestvar.getValues(), rows, columns, i),
FortranMatrixUtil.getColumn(xbestsplit.getValues(), rows, columns, i),
segmentSchema
);
treeModels.add(treeModel);
}
MiningModel miningModel = new MiningModel(MiningFunction.CLASSIFICATION, ModelUtil.createMiningSchema(categoricalLabel))
.setSegmentation(MiningModelUtil.createSegmentation(Segmentation.MultipleModelMethod.MAJORITY_VOTE, treeModels))
.setOutput(ModelUtil.createProbabilityOutput(DataType.DOUBLE, categoricalLabel));
return miningModel;
}