本文整理汇总了Java中weka.classifiers.bayes.NaiveBayesUpdateable.buildClassifier方法的典型用法代码示例。如果您正苦于以下问题:Java NaiveBayesUpdateable.buildClassifier方法的具体用法?Java NaiveBayesUpdateable.buildClassifier怎么用?Java NaiveBayesUpdateable.buildClassifier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.classifiers.bayes.NaiveBayesUpdateable
的用法示例。
在下文中一共展示了NaiveBayesUpdateable.buildClassifier方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildClassifier
import weka.classifiers.bayes.NaiveBayesUpdateable; //导入方法依赖的package包/类
/**
* Build the no-split node
*
* @param instances an <code>Instances</code> value
* @exception Exception if an error occurs
*/
public final void buildClassifier(Instances instances) throws Exception {
m_nb = new NaiveBayesUpdateable();
m_disc = new Discretize();
m_disc.setInputFormat(instances);
Instances temp = Filter.useFilter(instances, m_disc);
m_nb.buildClassifier(temp);
if (temp.numInstances() >= 5) {
m_errors = crossValidate(m_nb, temp, new Random(1));
}
m_numSubsets = 1;
}
示例2: handleNumericAttribute
import weka.classifiers.bayes.NaiveBayesUpdateable; //导入方法依赖的package包/类
/**
* Creates split on numeric attribute.
*
* @exception Exception if something goes wrong
*/
private void handleNumericAttribute(Instances trainInstances)
throws Exception {
m_c45S = new C45Split(m_attIndex, 2, m_sumOfWeights, true);
m_c45S.buildClassifier(trainInstances);
if (m_c45S.numSubsets() == 0) {
return;
}
m_errors = 0;
Instances[] trainingSets = new Instances[m_complexityIndex];
trainingSets[0] = new Instances(trainInstances, 0);
trainingSets[1] = new Instances(trainInstances, 0);
int subset = -1;
// populate the subsets
for (int i = 0; i < trainInstances.numInstances(); i++) {
Instance instance = trainInstances.instance(i);
subset = m_c45S.whichSubset(instance);
if (subset != -1) {
trainingSets[subset].add((Instance) instance.copy());
} else {
double[] weights = m_c45S.weights(instance);
for (int j = 0; j < m_complexityIndex; j++) {
Instance temp = (Instance) instance.copy();
if (weights.length == m_complexityIndex) {
temp.setWeight(temp.weight() * weights[j]);
} else {
temp.setWeight(temp.weight() / m_complexityIndex);
}
trainingSets[j].add(temp);
}
}
}
/*
* // compute weights (weights of instances per subset m_weights = new
* double [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) {
* m_weights[i] = trainingSets[i].sumOfWeights(); }
* Utils.normalize(m_weights);
*/
Random r = new Random(1);
int minNumCount = 0;
for (int i = 0; i < m_complexityIndex; i++) {
if (trainingSets[i].numInstances() > 5) {
minNumCount++;
// Discretize the sets
Discretize disc = new Discretize();
disc.setInputFormat(trainingSets[i]);
trainingSets[i] = Filter.useFilter(trainingSets[i], disc);
trainingSets[i].randomize(r);
trainingSets[i].stratify(5);
NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable();
fullModel.buildClassifier(trainingSets[i]);
// add the errors for this branch of the split
m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r);
} else {
for (int j = 0; j < trainingSets[i].numInstances(); j++) {
m_errors += trainingSets[i].instance(j).weight();
}
}
}
// Check if minimum number of Instances in at least two
// subsets.
if (minNumCount > 1) {
m_numSubsets = m_complexityIndex;
}
}
示例3: handleNumericAttribute
import weka.classifiers.bayes.NaiveBayesUpdateable; //导入方法依赖的package包/类
/**
* Creates split on numeric attribute.
*
* @exception Exception if something goes wrong
*/
private void handleNumericAttribute(Instances trainInstances)
throws Exception {
m_c45S = new C45Split(m_attIndex, 2, m_sumOfWeights, true);
m_c45S.buildClassifier(trainInstances);
if (m_c45S.numSubsets() == 0) {
return;
}
m_errors = 0;
Instances [] trainingSets = new Instances [m_complexityIndex];
trainingSets[0] = new Instances(trainInstances, 0);
trainingSets[1] = new Instances(trainInstances, 0);
int subset = -1;
// populate the subsets
for (int i = 0; i < trainInstances.numInstances(); i++) {
Instance instance = trainInstances.instance(i);
subset = m_c45S.whichSubset(instance);
if (subset != -1) {
trainingSets[subset].add((Instance)instance.copy());
} else {
double [] weights = m_c45S.weights(instance);
for (int j = 0; j < m_complexityIndex; j++) {
Instance temp = (Instance)instance.copy();
if (weights.length == m_complexityIndex) {
temp.setWeight(temp.weight() * weights[j]);
} else {
temp.setWeight(temp.weight() / m_complexityIndex);
}
trainingSets[j].add(temp);
}
}
}
/* // compute weights (weights of instances per subset
m_weights = new double [m_complexityIndex];
for (int i = 0; i < m_complexityIndex; i++) {
m_weights[i] = trainingSets[i].sumOfWeights();
}
Utils.normalize(m_weights); */
Random r = new Random(1);
int minNumCount = 0;
for (int i = 0; i < m_complexityIndex; i++) {
if (trainingSets[i].numInstances() > 5) {
minNumCount++;
// Discretize the sets
Discretize disc = new Discretize();
disc.setInputFormat(trainingSets[i]);
trainingSets[i] = Filter.useFilter(trainingSets[i], disc);
trainingSets[i].randomize(r);
trainingSets[i].stratify(5);
NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable();
fullModel.buildClassifier(trainingSets[i]);
// add the errors for this branch of the split
m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r);
} else {
for (int j = 0; j < trainingSets[i].numInstances(); j++) {
m_errors += trainingSets[i].instance(j).weight();
}
}
}
// Check if minimum number of Instances in at least two
// subsets.
if (minNumCount > 1) {
m_numSubsets = m_complexityIndex;
}
}
示例4: handleNumericAttribute
import weka.classifiers.bayes.NaiveBayesUpdateable; //导入方法依赖的package包/类
/**
* Creates split on numeric attribute.
*
* @exception Exception if something goes wrong
*/
private void handleNumericAttribute(Instances trainInstances)
throws Exception {
m_c45S = new C45Split(m_attIndex, 2, m_sumOfWeights);
m_c45S.buildClassifier(trainInstances);
if (m_c45S.numSubsets() == 0) {
return;
}
m_errors = 0;
Instances [] trainingSets = new Instances [m_complexityIndex];
trainingSets[0] = new Instances(trainInstances, 0);
trainingSets[1] = new Instances(trainInstances, 0);
int subset = -1;
// populate the subsets
for (int i = 0; i < trainInstances.numInstances(); i++) {
Instance instance = trainInstances.instance(i);
subset = m_c45S.whichSubset(instance);
if (subset != -1) {
trainingSets[subset].add((Instance)instance.copy());
} else {
double [] weights = m_c45S.weights(instance);
for (int j = 0; j < m_complexityIndex; j++) {
Instance temp = (Instance)instance.copy();
if (weights.length == m_complexityIndex) {
temp.setWeight(temp.weight() * weights[j]);
} else {
temp.setWeight(temp.weight() / m_complexityIndex);
}
trainingSets[j].add(temp);
}
}
}
/* // compute weights (weights of instances per subset
m_weights = new double [m_complexityIndex];
for (int i = 0; i < m_complexityIndex; i++) {
m_weights[i] = trainingSets[i].sumOfWeights();
}
Utils.normalize(m_weights); */
Random r = new Random(1);
int minNumCount = 0;
for (int i = 0; i < m_complexityIndex; i++) {
if (trainingSets[i].numInstances() > 5) {
minNumCount++;
// Discretize the sets
Discretize disc = new Discretize();
disc.setInputFormat(trainingSets[i]);
trainingSets[i] = Filter.useFilter(trainingSets[i], disc);
trainingSets[i].randomize(r);
trainingSets[i].stratify(5);
NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable();
fullModel.buildClassifier(trainingSets[i]);
// add the errors for this branch of the split
m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r);
} else {
for (int j = 0; j < trainingSets[i].numInstances(); j++) {
m_errors += trainingSets[i].instance(j).weight();
}
}
}
// Check if minimum number of Instances in at least two
// subsets.
if (minNumCount > 1) {
m_numSubsets = m_complexityIndex;
}
}
示例5: NBNode
import weka.classifiers.bayes.NaiveBayesUpdateable; //导入方法依赖的package包/类
/**
* Construct a new NBNode
*
* @param header the instances structure of the data we're learning from
* @param nbWeightThreshold the weight mass to see before allowing naive Bayes
* to predict
* @throws Exception if a problem occurs
*/
public NBNode(Instances header, double nbWeightThreshold) throws Exception {
m_nbWeightThreshold = nbWeightThreshold;
m_bayes = new NaiveBayesUpdateable();
m_bayes.buildClassifier(header);
}