當前位置: 首頁>>代碼示例>>Java>>正文


Java Instances.setClassIndex方法代碼示例

本文整理匯總了Java中weka.core.Instances.setClassIndex方法的典型用法代碼示例。如果您正苦於以下問題:Java Instances.setClassIndex方法的具體用法?Java Instances.setClassIndex怎麽用?Java Instances.setClassIndex使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在weka.core.Instances的用法示例。


在下文中一共展示了Instances.setClassIndex方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: buildAssociate

import weka.core.Instances; //導入方法依賴的package包/類
public static String buildAssociate() throws Exception {
    InstanceQuery query = new InstanceQuery();
    query.setUsername("root");
    query.setPassword("cs6310");
    query.setDatabaseURL("jdbc:mysql://localhost/system?#characterEncoding=UTF-8");
    query.setQuery("select * from courses_sessions;");
    // You can declare that your data set is sparse
    // query.setSparseData(true);
    Instances data = query.retrieveInstances();
    data.setClassIndex(data.numAttributes() - 1);

    final NumericToNominal filter = new NumericToNominal();

    filter.setInputFormat(data);
    data = Filter.useFilter(data, filter);
    if (data.size() > 0) {
        // build associator
        Apriori apriori = new Apriori();
        apriori.setClassIndex(data.classIndex());
        apriori.buildAssociations(data);
        return String.valueOf(apriori);
    } else {
        return "Not enough data provided";
    }
}
 
開發者ID:ejesposito,項目名稱:CS6310O01,代碼行數:26,代碼來源:WekaDataMiner.java

示例2: getDist

import weka.core.Instances; //導入方法依賴的package包/類
/**
 * <p>To get the distribution of inTrace and outTrace instance in given dataset in <b>path</b>.</p>
 * @param ins Instances of each project
 * @throws Exception 
 */
public static void getDist(String path) throws Exception{
	
	Instances ins = DataSource.read(path);
	int numAttr = ins.numAttributes();
	ins.setClassIndex(numAttr-1);
	
	int numIns = ins.numInstances();
	int intrace = 0;
	int outtrace = 0;
	for(int i=0; i<numIns; i++){
		if(ins.get(i).stringValue(ins.attribute(ins.classIndex())).equals("InTrace")){
			intrace++;
		}else{	
			outtrace++;
		}
	}
	
	System.out.printf("[ %-30s ] inTrace:%4d, outTrace:%4d.\n", path, intrace, outtrace);
}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:25,代碼來源:StatisticalProject.java

示例3: getEvalResultbySMOTE

import weka.core.Instances; //導入方法依賴的package包/類
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Use C4.5 and <b>SMOTE</b> to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbySMOTE(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		SMOTE smote = new SMOTE();
		smote.setInputFormat(ins);
		
		/** classifiers setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);

		FilteredClassifier fc = new FilteredClassifier();
		fc.setClassifier(j48);
		fc.setFilter(smote);
			
		Evaluation eval = new Evaluation(ins);	
		eval.crossValidateModel(fc, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
				
	}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:40,代碼來源:ImbalanceProcessingAve.java

示例4: main

import weka.core.Instances; //導入方法依賴的package包/類
/**
 * Run a test case of the backend.
 * Instantiates the command pattern, loads data into instance, and runs some tests.
 *
 * @param args
 */
public static void main(String[] args) {
    int maxThreads = 8;
    //Dispatcher dispatch = new Dispatcher(maxThreads);
    NoUserParameterDispatcherBuilder handler = new NoUserParameterDispatcherBuilder();

    //load some data
    String path = "src/test/resources/data/cpu.arff";
    Instances data = null;
    try {
        data = read_data(path);
    } catch (IOException e) {
        System.out.println("Error opening file: " + path +", exiting.");
        System.out.println("Error: " + e);
        System.out.println(System.getProperty("user.dir"));
        System.exit(0);
    }

    //designate the last column as the data's 'class'
    data.setClassIndex(data.numAttributes() - 1);

    //give data to the WekaTaskManager
    handler.dispatcher.setData(data);
    handler.dispatcher.setTimeLimit(1);
    handler.dispatcher.launch();
}
 
開發者ID:optimusmoose,項目名稱:miniML,代碼行數:32,代碼來源:backendTest.java

示例5: defineDataset

import weka.core.Instances; //導入方法依賴的package包/類
/**
 * Defines the structure of a Weka table
 * 
 * @param features
 *            Holds all features including a label, if training data is
 *            created.
 * @param datasetName
 *            Holds the dataset´s name
 * @return returns the empty created dataset
 */

private Instances defineDataset(FeatureVectorDataSet features, String datasetName) {
	ArrayList<weka.core.Attribute> attributes = new ArrayList<weka.core.Attribute>();
	// create features per attributes of the FeatureVectorDataSet
	for (Iterator<Attribute> attrIterator = features.getSchema().get().iterator(); attrIterator.hasNext();) {
		Attribute attr = attrIterator.next();
		if (!attr.equals(FeatureVectorDataSet.ATTRIBUTE_LABEL)) {
			weka.core.Attribute attribute = new weka.core.Attribute(attr.getIdentifier());
			attributes.add(attribute);
		}
	}

	// Treat the label as a special case, which is always at the last
	// position of the dataset.
	ArrayList<String> labels = new ArrayList<String>();
	labels.add("0");
	labels.add("1");
	weka.core.Attribute cls = new weka.core.Attribute("class", labels);
	attributes.add(cls);

	Instances dataset = new Instances(datasetName, attributes, 0);
	dataset.setClassIndex(attributes.size() - 1);
	return dataset;
}
 
開發者ID:olehmberg,項目名稱:winter,代碼行數:35,代碼來源:WekaMatchingRule.java

示例6: main

import weka.core.Instances; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception{
	
	String databasePath = "data/features.arff";
	
	// Load the data in arff format
	Instances data = new Instances(new BufferedReader(new FileReader(databasePath)));
	
	// Set class the last attribute as class
	data.setClassIndex(data.numAttributes() - 1);

	// Build a basic decision tree model
	String[] options = new String[]{};
	J48 model = new J48();
	model.setOptions(options);
	model.buildClassifier(data);
	
	// Output decision tree
	System.out.println("Decision tree model:\n"+model);
	
	// Output source code implementing the decision tree
	System.out.println("Source code:\n"+model.toSource("ActivityRecognitionEngine"));
	
	// Check accuracy of model using 10-fold cross-validation
	Evaluation eval = new Evaluation(data);
	eval.crossValidateModel(model, data, 10, new Random(1), new String[] {});
	System.out.println("Model performance:\n"+eval.toSummaryString());
	
	String[] activities = new String[]{"Walk", "Walk", "Walk", "Run", "Walk", "Run", "Run", "Sit", "Sit", "Sit"};
	DiscreteLowPass dlpFilter = new DiscreteLowPass(3);
	for(String str : activities){
		System.out.println(str +" -> "+ dlpFilter.filter(str));
	}
	
}
 
開發者ID:PacktPublishing,項目名稱:Machine-Learning-End-to-Endguide-for-Java-developers,代碼行數:35,代碼來源:ActivityRecognition.java

示例7: testCOMT2

import weka.core.Instances; //導入方法依賴的package包/類
public static void testCOMT2() throws Exception{
	BestConf bestconf = new BestConf();
	Instances trainingSet = DataIOFile.loadDataFromArffFile("data/trainingBestConf0.arff");
	trainingSet.setClassIndex(trainingSet.numAttributes()-1);
	
	Instances samplePoints = LHSInitializer.getMultiDimContinuous(bestconf.getAttributes(), InitialSampleSetSize, false);
	samplePoints.insertAttributeAt(trainingSet.classAttribute(), samplePoints.numAttributes());
	samplePoints.setClassIndex(samplePoints.numAttributes()-1);
	
	COMT2 comt = new COMT2(samplePoints, COMT2Iteration);
	
	comt.buildClassifier(trainingSet);
	
	Evaluation eval = new Evaluation(trainingSet);
	eval.evaluateModel(comt, trainingSet);
	System.err.println(eval.toSummaryString());
	
	Instance best = comt.getInstanceWithPossibleMaxY(samplePoints.firstInstance());
	Instances bestInstances = new Instances(trainingSet,2);
	bestInstances.add(best);
	DataIOFile.saveDataToXrffFile("data/trainingBestConf_COMT2.arff", bestInstances);
	
	//now we output the training set with the class value updated as the predicted value
	Instances output = new Instances(trainingSet, trainingSet.numInstances());
	Enumeration<Instance> enu = trainingSet.enumerateInstances();
	while(enu.hasMoreElements()){
		Instance ins = enu.nextElement();
		double[] values = ins.toDoubleArray();
		values[values.length-1] = comt.classifyInstance(ins);
		output.add(ins.copy(values));
	}
	DataIOFile.saveDataToXrffFile("data/trainingBestConf0_predict.xrff", output);
}
 
開發者ID:zhuyuqing,項目名稱:BestConfig,代碼行數:34,代碼來源:BestConf.java

示例8: Main

import weka.core.Instances; //導入方法依賴的package包/類
public Main() {
    try {
        BufferedReader datafile;
        datafile = readDataFile("camping.txt");
        Instances data = new Instances(datafile);
        data.setClassIndex(data.numAttributes() - 1);

        Instances trainingData = new Instances(data, 0, 14);
        Instances testingData = new Instances(data, 14, 5);
        Evaluation evaluation = new Evaluation(trainingData);

        SMO smo = new SMO();
        smo.buildClassifier(data);

        evaluation.evaluateModel(smo, testingData);
        System.out.println(evaluation.toSummaryString());

        // Test instance 
        Instance instance = new DenseInstance(3);
        instance.setValue(data.attribute("age"), 78);
        instance.setValue(data.attribute("income"), 125700);
        instance.setValue(data.attribute("camps"), 1);            
        instance.setDataset(data);
        System.out.println("The instance: " + instance);
        System.out.println(smo.classifyInstance(instance));
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}
 
開發者ID:PacktPublishing,項目名稱:Java-Data-Science-Made-Easy,代碼行數:30,代碼來源:Main-SVG.java

示例9: BookDecisionTree

import weka.core.Instances; //導入方法依賴的package包/類
public BookDecisionTree(String fileName) {
    try {
        BufferedReader reader = new BufferedReader(new FileReader(fileName));
        trainingData = new Instances(reader);
        trainingData.setClassIndex(trainingData.numAttributes() - 1);
    } catch (IOException ex) {
        ex.printStackTrace();
    }
}
 
開發者ID:PacktPublishing,項目名稱:Java-Data-Science-Made-Easy,代碼行數:10,代碼來源:BookDecisionTree.java

示例10: getIns

import weka.core.Instances; //導入方法依賴的package包/類
/***
	 * <p>To Merge the datasets in path array and save the total dataset in dirpath.
	 * </p>
	 * @param path String array of arff file
	 * @throws Exception
	 */
	public static void getIns(String[] path, String dirpath) throws Exception{
		
		/** Create a empty dataset total*/
		Instances total = new Instances("total3500", getStandAttrs(), 1);
		
		total.setClassIndex(total.numAttributes() - 1);
		
		int len = path.length;
		Instances[] temp = new Instances[len];
		
		for(int i=0; i<path.length; i++){
			
			temp[i] = DataSource.read(path[i]);
			temp[i].setClassIndex(temp[i].numAttributes() - 1);
			
			total.addAll(temp[i]);
			System.out.println("adding " + path[i] + " " + temp[i].numInstances());
//			System.out.println("data" + total.numInstances() + "\n");
		}
		
		String totalName = dirpath+"total3500" + String.valueOf(System.currentTimeMillis()) + ".arff";
		
		DataSink.write(totalName,
				total);
		System.out.println("Writing the data into [" + totalName + "] successfully.\n");
	}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:33,代碼來源:InsMerge.java

示例11: getEvalResultbyNo

import weka.core.Instances; //導入方法依賴的package包/類
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Only use C4.5 to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbyNo(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		/** classifiers setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);
		
		Evaluation eval = new Evaluation(ins);	
		eval.crossValidateModel(j48, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
			
	}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:33,代碼來源:ImbalanceProcessingAve.java

示例12: constructInstances

import weka.core.Instances; //導入方法依賴的package包/類
public static Instances constructInstances(List<Pair<String, Attribute>> attributesList, List<Observation> observations) {
    List<Pair<String, Attribute>> attributes = attributesList
            .stream()
            .filter(pair -> !pair.getValue().isString())
            .collect(Collectors.toList());

    Instances trainingSet = new Instances(
            "Standard set",
            (ArrayList<Attribute>) attributes
                    .stream()
                    .map(pair -> pair.getValue())
                    .collect(Collectors.toList()),
            observations.size());

    for(int i = 0; i < observations.size(); i ++) {
        Instance instance = new DenseInstance(attributes.size());

        for(Pair<String, Attribute> attributePair : attributes) {
            updateInstance(attributePair, instance, observations, i);
        }

        trainingSet.add(instance);
    }

    trainingSet.setClassIndex(attributes.stream().map(pair -> pair.getKey()).collect(Collectors.toList()).indexOf(Fields.GENDER.name()));

    return trainingSet;
}
 
開發者ID:GeorgiMateev,項目名稱:twitter-user-gender-classification,代碼行數:29,代碼來源:FeatureVectorsFactory.java

示例13: getEvalResultbyCost

import weka.core.Instances; //導入方法依賴的package包/類
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Use C4.5 and <b>Cost-sensitive learning</b> to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbyCost(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		/**Classifier setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);
		
		CostSensitiveClassifier csc = new CostSensitiveClassifier();
		csc.setClassifier(j48);
		csc.setCostMatrix(new CostMatrix(new BufferedReader(new FileReader("files/costm"))));
		
		Evaluation eval = new Evaluation(ins);
		
		eval.crossValidateModel(csc, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
			
	}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:38,代碼來源:ImbalanceProcessingAve.java

示例14: train

import weka.core.Instances; //導入方法依賴的package包/類
/**
 * This function only train the model with the trainSet as it is.
 * In other words, no feature removal will done here.
 * 
 * @param trainSet
 * @throws Exception 
 */
public void train(Instances trainSet) throws Exception {
    trainSet.setClassIndex(trainSet.numAttributes() - 1);
    // set classifier: use linear SVM only
    String[] options = weka.core.Utils.splitOptions("-K 0");
    String classifierName = "weka.classifiers.functions.LibSVM";
    this.m_Classifier = Classifier.forName(classifierName, options);
    // get probability instead of explicit prediction
    LibSVM libsvm = (LibSVM) this.m_Classifier;
    libsvm.setProbabilityEstimates(true);
    // build model
    this.m_Classifier.buildClassifier(trainSet);
}
 
開發者ID:NLPReViz,項目名稱:emr-nlp-server,代碼行數:20,代碼來源:SVMPredictor.java

示例15: createInstances

import weka.core.Instances; //導入方法依賴的package包/類
/**
 * Creates a basic model.
 *
 * @return the model
 */
private Instances createInstances() {
	ArrayList<Attribute> attributes = new ArrayList<>();
	attributes.add(new Attribute("text", (ArrayList<String>) null));
	attributes.add(new Attribute("@@[email protected]@", CLASSES));
	Instances instances = new Instances("instances", attributes, 0);
	instances.setClassIndex(instances.numAttributes() - 1);
	return instances;
}
 
開發者ID:venilnoronha,項目名稱:movie-rating-prediction,代碼行數:14,代碼來源:SVMTrainerImpl.java


注:本文中的weka.core.Instances.setClassIndex方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。