當前位置: 首頁>>代碼示例>>Java>>正文


Java J48.setOptions方法代碼示例

本文整理匯總了Java中weka.classifiers.trees.J48.setOptions方法的典型用法代碼示例。如果您正苦於以下問題:Java J48.setOptions方法的具體用法?Java J48.setOptions怎麽用?Java J48.setOptions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在weka.classifiers.trees.J48的用法示例。


在下文中一共展示了J48.setOptions方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: performTraining

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
private J48 performTraining() {
        J48 j48 = new J48();
        String[] options = {"-U"};
//        Use unpruned tree. -U
        try {
            j48.setOptions(options);
            j48.buildClassifier(trainingData);
        } catch (Exception ex) {
            ex.printStackTrace();
        }
        return j48;
    }
 
開發者ID:PacktPublishing,項目名稱:Java-Data-Science-Made-Easy,代碼行數:13,代碼來源:BookDecisionTree.java

示例2: main

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception{
	
	String databasePath = "data/features.arff";
	
	// Load the data in arff format
	Instances data = new Instances(new BufferedReader(new FileReader(databasePath)));
	
	// Set class the last attribute as class
	data.setClassIndex(data.numAttributes() - 1);

	// Build a basic decision tree model
	String[] options = new String[]{};
	J48 model = new J48();
	model.setOptions(options);
	model.buildClassifier(data);
	
	// Output decision tree
	System.out.println("Decision tree model:\n"+model);
	
	// Output source code implementing the decision tree
	System.out.println("Source code:\n"+model.toSource("ActivityRecognitionEngine"));
	
	// Check accuracy of model using 10-fold cross-validation
	Evaluation eval = new Evaluation(data);
	eval.crossValidateModel(model, data, 10, new Random(1), new String[] {});
	System.out.println("Model performance:\n"+eval.toSummaryString());
	
	String[] activities = new String[]{"Walk", "Walk", "Walk", "Run", "Walk", "Run", "Run", "Sit", "Sit", "Sit"};
	DiscreteLowPass dlpFilter = new DiscreteLowPass(3);
	for(String str : activities){
		System.out.println(str +" -> "+ dlpFilter.filter(str));
	}
	
}
 
開發者ID:PacktPublishing,項目名稱:Machine-Learning-End-to-Endguide-for-Java-developers,代碼行數:35,代碼來源:ActivityRecognition.java

示例3: executeClassifier

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
/**
 * Executes the classifier.
 * 
 * @param prepfeatures the prepared features in arff format
 * @param modelfile the path to the serialized model
 * @param clusters the clusters to classify
 * @return a map of the classified clusters, the keys are the classes
 * 		and the values are lists of cluster id's belonging to those classes
 */
private Map<ClusterClass, List<StoredDomainCluster>> executeClassifier(String prepfeatures, String modelfile, 
		List<StoredDomainCluster> clusters){
	Map<ClusterClass, List<StoredDomainCluster>> retval = 
			new HashMap<ClusterClass, List<StoredDomainCluster>>();
	try{
		DataSource source = new DataSource(new ByteArrayInputStream(prepfeatures.getBytes()));
		Instances data = source.getDataSet();
		if (data.classIndex() == -1){
			data.setClassIndex(data.numAttributes() - 1);
		}
		String[] options = weka.core.Utils.splitOptions("-p 0");
		J48 cls = (J48)weka.core.SerializationHelper.read(modelfile);
		cls.setOptions(options);
		for(int i = 0; i < data.numInstances(); i++){
			double pred = cls.classifyInstance(data.instance(i));
			ClusterClass clusClass = ClusterClass.valueOf(
					data.classAttribute().value((int)pred).toUpperCase());
			if(!retval.containsKey(clusClass)){
				retval.put(clusClass, new ArrayList<StoredDomainCluster>());
			}
			retval.get(clusClass).add(clusters.get(i));
		}
	} catch (Exception e) {
		if(log.isErrorEnabled()){
			log.error("Error executing classifier.", e);
		}
	}
	return retval;
}
 
開發者ID:perdisci,項目名稱:fluxbuster,代碼行數:39,代碼來源:Classifier.java

示例4: crossValidation

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public void crossValidation(String file, ArrayList<String> keys) {
	try {
		// load the file
		loadTrainFile(file);

		// build the classifier
		String[] options = { "-C", "0.1", "-M", "30" };

		J48 tree = new J48(); // new instance of tree
		tree.setOptions(options); // set the options
		tree.buildClassifier(train); // build classifier

		// evaluate the classifier
		Evaluation eval = new Evaluation(train);
		eval.crossValidateModel(tree, train, 10, new Random(1));

		// print the results
		Logger.log(LogLevel.Classification, eval.toSummaryString());
		double[][] matrix = eval.confusionMatrix();

		for (int i = 0; i < matrix.length; i++) {
			double[] line = matrix[i];
			for (int j = 0; j < line.length; j++) {
				System.out.print((int) line[j] + "\t");
			}
			System.out.println();
		}

		printLatex(keys, matrix);
	} catch (Exception e) {
		Logger.log(LogLevel.Error, e.toString());
	}
}
 
開發者ID:mbraeunlein,項目名稱:ExtendedHodoku,代碼行數:34,代碼來源:Analyzer.java

示例5: generateDecisionTree

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
protected Classifier generateDecisionTree(AbstractClusterer clusterer, MarkovAttributeSet aset, Instances data) throws Exception {
    // We need to create a new Attribute that has the ClusterId
    Instances newData = data; // new Instances(data);
    newData.insertAttributeAt(new Attribute("ClusterId"), newData.numAttributes());
    Attribute cluster_attr = newData.attribute(newData.numAttributes()-1);
    assert(cluster_attr != null);
    assert(cluster_attr.index() > 0);
    newData.setClass(cluster_attr);
    
    // We will then tell the Classifier to predict that ClusterId based on the MarkovAttributeSet
    ObjectHistogram<Integer> cluster_h = new ObjectHistogram<Integer>();
    for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
        // Grab the Instance and throw it at the the clusterer to get the target cluster
        Instance inst = newData.instance(i);
        int c = (int)clusterer.clusterInstance(inst);
        inst.setClassValue(c);
        cluster_h.put(c);
    } // FOR
    System.err.println("Number of Elements: " + cluster_h.getValueCount());
    System.err.println(cluster_h);

    NumericToNominal filter = new NumericToNominal();
    filter.setInputFormat(newData);
    newData = Filter.useFilter(newData, filter);
    
    String output = this.catalog_proc.getName() + "-labeled.arff";
    FileUtil.writeStringToFile(output, newData.toString());
    LOG.info("Wrote labeled data set to " + output);
    
    // Decision Tree
    J48 j48 = new J48();
    String options[] = {
        "-S", Integer.toString(this.rand.nextInt()),
        
    };
    j48.setOptions(options);

    // Make sure we add the ClusterId attribute to a new MarkovAttributeSet so that
    // we can tell the Classifier to classify that!
    FilteredClassifier fc = new FilteredClassifier();
    MarkovAttributeSet classifier_aset = new MarkovAttributeSet(aset);
    classifier_aset.add(cluster_attr);
    fc.setFilter(classifier_aset.createFilter(newData));
    fc.setClassifier(j48);
    
    // Bombs away!
    fc.buildClassifier(newData);
    
    return (fc);
}
 
開發者ID:s-store,項目名稱:sstore-soft,代碼行數:51,代碼來源:FeatureClusterer.java

示例6: classifyJ48

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public J48 classifyJ48(Instances data, String[] options) throws Exception {
    J48 tree = new J48();
    tree.setOptions(options);
    tree.buildClassifier(data);
    return tree;
}
 
開發者ID:andrzejtrzaska,項目名稱:VoiceStressAnalysis,代碼行數:7,代碼來源:Classification.java

示例7: main

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public static void main(String[] args) {
        Instances instances = LoadUtils.loadDataFile("paw02a-600-5-0-BI-ver-3-zmieszane.arff");
        Random rand = new Random(1);
        instances.randomize(rand);
        RandomSplitResultProducer  x = new RandomSplitResultProducer();

        if (instances.classIndex() == -1) {
            instances.setClassIndex(instances.numAttributes() - 1);

        }
        double percent = 66;
        int trainSize = (int) Math.round(instances.numInstances() * percent
                / 100);
        int testSize = instances.numInstances() - trainSize;
        Instances train = new Instances(instances, 0, trainSize);
        Instances test = new Instances(instances, trainSize, testSize);
         train.randomize(rand);
        test.randomize(rand);
        try {
            J48 classifier = new J48();
            classifier.setOptions(weka.core.Utils.splitOptions("-U -M 2"));


            FilteredClassifier fc = new FilteredClassifier();

//                SMOTE smote = new SMOTE();
//                smote.setOptions(weka.core.Utils.splitOptions("-C 0 -K 5 -P " + getSMOTEPercentage(train) + " -S 1"));
//                smote.setInputFormat(train);
//                fc.setFilter(smote);

            fc.setClassifier(classifier);
            fc.buildClassifier(train);
            Evaluation eval = new Evaluation(train);

            eval.evaluateModel(classifier, test);

            System.out.println(eval.toSummaryString());
            System.out.println(eval.toMatrixString());

        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:kokojumbo,項目名稱:master-thesis,代碼行數:45,代碼來源:Main.java

示例8: runnerSimpleClasssifierCV

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public static void runnerSimpleClasssifierCV(String[] fileNames, boolean smoteEnable) {
    for (String filename : fileNames) {
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================      " + filename + "       =======================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");


        Instances instances = LoadUtils.loadDataFile(filename);

        if (instances.classIndex() == -1) {
            instances.setClassIndex(instances.numAttributes() - 1);

        }


        try {
            J48 classifier = new J48();
            classifier.setOptions(weka.core.Utils.splitOptions("-U -M 2"));


            FilteredClassifier fc = new FilteredClassifier();
            if (smoteEnable) {
                SMOTE smote = new SMOTE();
                smote.setOptions(weka.core.Utils.splitOptions("-C 0 -K 5 -P " + getSMOTEPercentage(instances) + " -S 1"));
                smote.setInputFormat(instances);
                fc.setFilter(smote);
            }
            fc.setClassifier(classifier);
            fc.buildClassifier(instances);
            Evaluation eval = new Evaluation(instances);
            eval.crossValidateModel(fc, instances, 10, new Random(1));
            printResults(eval);

        } catch (Exception e) {
            e.printStackTrace();
        }


    }

}
 
開發者ID:kokojumbo,項目名稱:master-thesis,代碼行數:46,代碼來源:MainClusterClassifier.java

示例9: runnerSimpleClasssifierTrainTest

import weka.classifiers.trees.J48; //導入方法依賴的package包/類
public static void runnerSimpleClasssifierTrainTest(String[] fileNames, boolean smoteEnable) {
    for (String filename : fileNames) {
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================      " + filename + "       =======================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");
        System.out.println("======================================================================");


        Instances instances = LoadUtils.loadDataFile(filename);
        instances.randomize(new Random(0));
        if (instances.classIndex() == -1) {
            instances.setClassIndex(instances.numAttributes() - 1);

        }
        double percent = 66.6;
        int trainSize = (int) Math.round(instances.numInstances() * percent
                / 100);
        int testSize = instances.numInstances() - trainSize;
        Instances train = new Instances(instances, 0, trainSize);
        Instances test = new Instances(instances, trainSize, testSize);

        try {
            J48 classifier = new J48();
            classifier.setOptions(weka.core.Utils.splitOptions("-U -M 2"));


            FilteredClassifier fc = new FilteredClassifier();
            if (smoteEnable) {
                SMOTE smote = new SMOTE();
                smote.setOptions(weka.core.Utils.splitOptions("-C 0 -K 5 -P " + getSMOTEPercentage(train) + " -S 1"));
                smote.setInputFormat(train);
                fc.setFilter(smote);
            }
            fc.setClassifier(classifier);
            fc.buildClassifier(train);
            Evaluation eval = new Evaluation(train);

            eval.evaluateModel(fc, test);

            printResults(eval);


        } catch (Exception e) {
            e.printStackTrace();
        }


    }

}
 
開發者ID:kokojumbo,項目名稱:master-thesis,代碼行數:54,代碼來源:MainClusterClassifier.java


注:本文中的weka.classifiers.trees.J48.setOptions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。