本文整理汇总了Java中weka.core.Instances.setClass方法的典型用法代码示例。如果您正苦于以下问题:Java Instances.setClass方法的具体用法?Java Instances.setClass怎么用?Java Instances.setClass使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.core.Instances
的用法示例。
在下文中一共展示了Instances.setClass方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getUserSpecifiedDataset
import weka.core.Instances; //导入方法依赖的package包/类
/**
* Create a new, smaller data Instances that contains only the attributes the user selected.
*
* It does this by counting out the indices that the user wanted and removing anything that doesn't match.
* If it were elegant, it wouldn't be java. :/
*
* @return Instances newData
*/
public Instances getUserSpecifiedDataset(){
Instances newData = new Instances(this.master);
newData.setClass(newData.attribute(this.classIndex));
//need to reverse sort them so our array doesn't get screwed up.
Integer[] keepAttributes = new Integer[this.userAttributeSelectionIndices.length];
for (int i = 0; i < this.userAttributeSelectionIndices.length; i++) {
keepAttributes[i] = Integer.valueOf(this.userAttributeSelectionIndices[i]);
}
Arrays.sort(keepAttributes, Collections.reverseOrder());
for(Integer i = this.userAttributeSelectionIndices.length; i >= 0; --i){
if(! Arrays.asList(keepAttributes).contains(i)){
newData.deleteAttributeAt(i);
}
}
return(newData);
}
示例2: convertToArff
import weka.core.Instances; //导入方法依赖的package包/类
public static Instances convertToArff(List<Document> dataSet, List<String> vocabulary, String fileName) {
int dataSetSize = dataSet.size();
/* Create features */
ArrayList<Attribute> attributes = new ArrayList<>();
for (int i = 0; i < vocabulary.size(); i++) {
attributes.add(new Attribute("word_" + i));
}
Attribute classAttribute = new Attribute("Class");
attributes.add(classAttribute);
/* Add examples */
System.out.println("Building instances...");
Instances trainingDataSet = new Instances(fileName, attributes, 0);
for (int k = 0; k < dataSetSize; k++) {
Document document = dataSet.get(k);
Instance example = new DenseInstance(attributes.size());
for (int i = 0; i < vocabulary.size(); i++) {
String word = vocabulary.get(i);
example.setValue(i, Collections.frequency(document.getTerms(), word));
}
example.setValue(classAttribute, document.getDocumentClass());
trainingDataSet.add(example);
int progress = (int) ((k * 100.0) / dataSetSize);
System.out.printf("\rPercent completed: %3d%%", progress);
}
trainingDataSet.setClass(classAttribute);
System.out.println();
System.out.println("Writing to file ...");
try {
ArffSaver saver = new ArffSaver();
saver.setInstances(trainingDataSet);
saver.setFile(new File(fileName));
saver.writeBatch();
} catch (IOException e) {
e.printStackTrace();
}
return trainingDataSet;
}
示例3: generateDecisionTree
import weka.core.Instances; //导入方法依赖的package包/类
protected Classifier generateDecisionTree(AbstractClusterer clusterer, MarkovAttributeSet aset, Instances data) throws Exception {
// We need to create a new Attribute that has the ClusterId
Instances newData = data; // new Instances(data);
newData.insertAttributeAt(new Attribute("ClusterId"), newData.numAttributes());
Attribute cluster_attr = newData.attribute(newData.numAttributes()-1);
assert(cluster_attr != null);
assert(cluster_attr.index() > 0);
newData.setClass(cluster_attr);
// We will then tell the Classifier to predict that ClusterId based on the MarkovAttributeSet
ObjectHistogram<Integer> cluster_h = new ObjectHistogram<Integer>();
for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
// Grab the Instance and throw it at the the clusterer to get the target cluster
Instance inst = newData.instance(i);
int c = (int)clusterer.clusterInstance(inst);
inst.setClassValue(c);
cluster_h.put(c);
} // FOR
System.err.println("Number of Elements: " + cluster_h.getValueCount());
System.err.println(cluster_h);
NumericToNominal filter = new NumericToNominal();
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
String output = this.catalog_proc.getName() + "-labeled.arff";
FileUtil.writeStringToFile(output, newData.toString());
LOG.info("Wrote labeled data set to " + output);
// Decision Tree
J48 j48 = new J48();
String options[] = {
"-S", Integer.toString(this.rand.nextInt()),
};
j48.setOptions(options);
// Make sure we add the ClusterId attribute to a new MarkovAttributeSet so that
// we can tell the Classifier to classify that!
FilteredClassifier fc = new FilteredClassifier();
MarkovAttributeSet classifier_aset = new MarkovAttributeSet(aset);
classifier_aset.add(cluster_attr);
fc.setFilter(classifier_aset.createFilter(newData));
fc.setClassifier(j48);
// Bombs away!
fc.buildClassifier(newData);
return (fc);
}