本文整理匯總了Java中weka.core.Instances.setClass方法的典型用法代碼示例。如果您正苦於以下問題:Java Instances.setClass方法的具體用法?Java Instances.setClass怎麽用?Java Instances.setClass使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類weka.core.Instances
的用法示例。
在下文中一共展示了Instances.setClass方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getUserSpecifiedDataset
import weka.core.Instances; //導入方法依賴的package包/類
/**
* Create a new, smaller data Instances that contains only the attributes the user selected.
*
* It does this by counting out the indices that the user wanted and removing anything that doesn't match.
* If it were elegant, it wouldn't be java. :/
*
* @return Instances newData
*/
public Instances getUserSpecifiedDataset(){
Instances newData = new Instances(this.master);
newData.setClass(newData.attribute(this.classIndex));
//need to reverse sort them so our array doesn't get screwed up.
Integer[] keepAttributes = new Integer[this.userAttributeSelectionIndices.length];
for (int i = 0; i < this.userAttributeSelectionIndices.length; i++) {
keepAttributes[i] = Integer.valueOf(this.userAttributeSelectionIndices[i]);
}
Arrays.sort(keepAttributes, Collections.reverseOrder());
for(Integer i = this.userAttributeSelectionIndices.length; i >= 0; --i){
if(! Arrays.asList(keepAttributes).contains(i)){
newData.deleteAttributeAt(i);
}
}
return(newData);
}
示例2: convertToArff
import weka.core.Instances; //導入方法依賴的package包/類
public static Instances convertToArff(List<Document> dataSet, List<String> vocabulary, String fileName) {
int dataSetSize = dataSet.size();
/* Create features */
ArrayList<Attribute> attributes = new ArrayList<>();
for (int i = 0; i < vocabulary.size(); i++) {
attributes.add(new Attribute("word_" + i));
}
Attribute classAttribute = new Attribute("Class");
attributes.add(classAttribute);
/* Add examples */
System.out.println("Building instances...");
Instances trainingDataSet = new Instances(fileName, attributes, 0);
for (int k = 0; k < dataSetSize; k++) {
Document document = dataSet.get(k);
Instance example = new DenseInstance(attributes.size());
for (int i = 0; i < vocabulary.size(); i++) {
String word = vocabulary.get(i);
example.setValue(i, Collections.frequency(document.getTerms(), word));
}
example.setValue(classAttribute, document.getDocumentClass());
trainingDataSet.add(example);
int progress = (int) ((k * 100.0) / dataSetSize);
System.out.printf("\rPercent completed: %3d%%", progress);
}
trainingDataSet.setClass(classAttribute);
System.out.println();
System.out.println("Writing to file ...");
try {
ArffSaver saver = new ArffSaver();
saver.setInstances(trainingDataSet);
saver.setFile(new File(fileName));
saver.writeBatch();
} catch (IOException e) {
e.printStackTrace();
}
return trainingDataSet;
}
示例3: generateDecisionTree
import weka.core.Instances; //導入方法依賴的package包/類
protected Classifier generateDecisionTree(AbstractClusterer clusterer, MarkovAttributeSet aset, Instances data) throws Exception {
// We need to create a new Attribute that has the ClusterId
Instances newData = data; // new Instances(data);
newData.insertAttributeAt(new Attribute("ClusterId"), newData.numAttributes());
Attribute cluster_attr = newData.attribute(newData.numAttributes()-1);
assert(cluster_attr != null);
assert(cluster_attr.index() > 0);
newData.setClass(cluster_attr);
// We will then tell the Classifier to predict that ClusterId based on the MarkovAttributeSet
ObjectHistogram<Integer> cluster_h = new ObjectHistogram<Integer>();
for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
// Grab the Instance and throw it at the the clusterer to get the target cluster
Instance inst = newData.instance(i);
int c = (int)clusterer.clusterInstance(inst);
inst.setClassValue(c);
cluster_h.put(c);
} // FOR
System.err.println("Number of Elements: " + cluster_h.getValueCount());
System.err.println(cluster_h);
NumericToNominal filter = new NumericToNominal();
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
String output = this.catalog_proc.getName() + "-labeled.arff";
FileUtil.writeStringToFile(output, newData.toString());
LOG.info("Wrote labeled data set to " + output);
// Decision Tree
J48 j48 = new J48();
String options[] = {
"-S", Integer.toString(this.rand.nextInt()),
};
j48.setOptions(options);
// Make sure we add the ClusterId attribute to a new MarkovAttributeSet so that
// we can tell the Classifier to classify that!
FilteredClassifier fc = new FilteredClassifier();
MarkovAttributeSet classifier_aset = new MarkovAttributeSet(aset);
classifier_aset.add(cluster_attr);
fc.setFilter(classifier_aset.createFilter(newData));
fc.setClassifier(j48);
// Bombs away!
fc.buildClassifier(newData);
return (fc);
}