当前位置: 首页>>代码示例>>Java>>正文


Java Instances.setClassIndex方法代码示例

本文整理汇总了Java中weka.core.Instances.setClassIndex方法的典型用法代码示例。如果您正苦于以下问题:Java Instances.setClassIndex方法的具体用法?Java Instances.setClassIndex怎么用?Java Instances.setClassIndex使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在weka.core.Instances的用法示例。


在下文中一共展示了Instances.setClassIndex方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: buildAssociate

import weka.core.Instances; //导入方法依赖的package包/类
public static String buildAssociate() throws Exception {
    InstanceQuery query = new InstanceQuery();
    query.setUsername("root");
    query.setPassword("cs6310");
    query.setDatabaseURL("jdbc:mysql://localhost/system?#characterEncoding=UTF-8");
    query.setQuery("select * from courses_sessions;");
    // You can declare that your data set is sparse
    // query.setSparseData(true);
    Instances data = query.retrieveInstances();
    data.setClassIndex(data.numAttributes() - 1);

    final NumericToNominal filter = new NumericToNominal();

    filter.setInputFormat(data);
    data = Filter.useFilter(data, filter);
    if (data.size() > 0) {
        // build associator
        Apriori apriori = new Apriori();
        apriori.setClassIndex(data.classIndex());
        apriori.buildAssociations(data);
        return String.valueOf(apriori);
    } else {
        return "Not enough data provided";
    }
}
 
开发者ID:ejesposito,项目名称:CS6310O01,代码行数:26,代码来源:WekaDataMiner.java

示例2: getDist

import weka.core.Instances; //导入方法依赖的package包/类
/**
 * <p>To get the distribution of inTrace and outTrace instance in given dataset in <b>path</b>.</p>
 * @param ins Instances of each project
 * @throws Exception 
 */
public static void getDist(String path) throws Exception{
	
	Instances ins = DataSource.read(path);
	int numAttr = ins.numAttributes();
	ins.setClassIndex(numAttr-1);
	
	int numIns = ins.numInstances();
	int intrace = 0;
	int outtrace = 0;
	for(int i=0; i<numIns; i++){
		if(ins.get(i).stringValue(ins.attribute(ins.classIndex())).equals("InTrace")){
			intrace++;
		}else{	
			outtrace++;
		}
	}
	
	System.out.printf("[ %-30s ] inTrace:%4d, outTrace:%4d.\n", path, intrace, outtrace);
}
 
开发者ID:Gu-Youngfeng,项目名称:CraTer,代码行数:25,代码来源:StatisticalProject.java

示例3: getEvalResultbySMOTE

import weka.core.Instances; //导入方法依赖的package包/类
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Use C4.5 and <b>SMOTE</b> to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbySMOTE(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		SMOTE smote = new SMOTE();
		smote.setInputFormat(ins);
		
		/** classifiers setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);

		FilteredClassifier fc = new FilteredClassifier();
		fc.setClassifier(j48);
		fc.setFilter(smote);
			
		Evaluation eval = new Evaluation(ins);	
		eval.crossValidateModel(fc, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
				
	}
 
开发者ID:Gu-Youngfeng,项目名称:CraTer,代码行数:40,代码来源:ImbalanceProcessingAve.java

示例4: main

import weka.core.Instances; //导入方法依赖的package包/类
/**
 * Run a test case of the backend.
 * Instantiates the command pattern, loads data into instance, and runs some tests.
 *
 * @param args
 */
public static void main(String[] args) {
    int maxThreads = 8;
    //Dispatcher dispatch = new Dispatcher(maxThreads);
    NoUserParameterDispatcherBuilder handler = new NoUserParameterDispatcherBuilder();

    //load some data
    String path = "src/test/resources/data/cpu.arff";
    Instances data = null;
    try {
        data = read_data(path);
    } catch (IOException e) {
        System.out.println("Error opening file: " + path +", exiting.");
        System.out.println("Error: " + e);
        System.out.println(System.getProperty("user.dir"));
        System.exit(0);
    }

    //designate the last column as the data's 'class'
    data.setClassIndex(data.numAttributes() - 1);

    //give data to the WekaTaskManager
    handler.dispatcher.setData(data);
    handler.dispatcher.setTimeLimit(1);
    handler.dispatcher.launch();
}
 
开发者ID:optimusmoose,项目名称:miniML,代码行数:32,代码来源:backendTest.java

示例5: defineDataset

import weka.core.Instances; //导入方法依赖的package包/类
/**
 * Defines the structure of a Weka table
 * 
 * @param features
 *            Holds all features including a label, if training data is
 *            created.
 * @param datasetName
 *            Holds the dataset´s name
 * @return returns the empty created dataset
 */

private Instances defineDataset(FeatureVectorDataSet features, String datasetName) {
	ArrayList<weka.core.Attribute> attributes = new ArrayList<weka.core.Attribute>();
	// create features per attributes of the FeatureVectorDataSet
	for (Iterator<Attribute> attrIterator = features.getSchema().get().iterator(); attrIterator.hasNext();) {
		Attribute attr = attrIterator.next();
		if (!attr.equals(FeatureVectorDataSet.ATTRIBUTE_LABEL)) {
			weka.core.Attribute attribute = new weka.core.Attribute(attr.getIdentifier());
			attributes.add(attribute);
		}
	}

	// Treat the label as a special case, which is always at the last
	// position of the dataset.
	ArrayList<String> labels = new ArrayList<String>();
	labels.add("0");
	labels.add("1");
	weka.core.Attribute cls = new weka.core.Attribute("class", labels);
	attributes.add(cls);

	Instances dataset = new Instances(datasetName, attributes, 0);
	dataset.setClassIndex(attributes.size() - 1);
	return dataset;
}
 
开发者ID:olehmberg,项目名称:winter,代码行数:35,代码来源:WekaMatchingRule.java

示例6: main

import weka.core.Instances; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception{
	
	String databasePath = "data/features.arff";
	
	// Load the data in arff format
	Instances data = new Instances(new BufferedReader(new FileReader(databasePath)));
	
	// Set class the last attribute as class
	data.setClassIndex(data.numAttributes() - 1);

	// Build a basic decision tree model
	String[] options = new String[]{};
	J48 model = new J48();
	model.setOptions(options);
	model.buildClassifier(data);
	
	// Output decision tree
	System.out.println("Decision tree model:\n"+model);
	
	// Output source code implementing the decision tree
	System.out.println("Source code:\n"+model.toSource("ActivityRecognitionEngine"));
	
	// Check accuracy of model using 10-fold cross-validation
	Evaluation eval = new Evaluation(data);
	eval.crossValidateModel(model, data, 10, new Random(1), new String[] {});
	System.out.println("Model performance:\n"+eval.toSummaryString());
	
	String[] activities = new String[]{"Walk", "Walk", "Walk", "Run", "Walk", "Run", "Run", "Sit", "Sit", "Sit"};
	DiscreteLowPass dlpFilter = new DiscreteLowPass(3);
	for(String str : activities){
		System.out.println(str +" -> "+ dlpFilter.filter(str));
	}
	
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:35,代码来源:ActivityRecognition.java

示例7: testCOMT2

import weka.core.Instances; //导入方法依赖的package包/类
public static void testCOMT2() throws Exception{
	BestConf bestconf = new BestConf();
	Instances trainingSet = DataIOFile.loadDataFromArffFile("data/trainingBestConf0.arff");
	trainingSet.setClassIndex(trainingSet.numAttributes()-1);
	
	Instances samplePoints = LHSInitializer.getMultiDimContinuous(bestconf.getAttributes(), InitialSampleSetSize, false);
	samplePoints.insertAttributeAt(trainingSet.classAttribute(), samplePoints.numAttributes());
	samplePoints.setClassIndex(samplePoints.numAttributes()-1);
	
	COMT2 comt = new COMT2(samplePoints, COMT2Iteration);
	
	comt.buildClassifier(trainingSet);
	
	Evaluation eval = new Evaluation(trainingSet);
	eval.evaluateModel(comt, trainingSet);
	System.err.println(eval.toSummaryString());
	
	Instance best = comt.getInstanceWithPossibleMaxY(samplePoints.firstInstance());
	Instances bestInstances = new Instances(trainingSet,2);
	bestInstances.add(best);
	DataIOFile.saveDataToXrffFile("data/trainingBestConf_COMT2.arff", bestInstances);
	
	//now we output the training set with the class value updated as the predicted value
	Instances output = new Instances(trainingSet, trainingSet.numInstances());
	Enumeration<Instance> enu = trainingSet.enumerateInstances();
	while(enu.hasMoreElements()){
		Instance ins = enu.nextElement();
		double[] values = ins.toDoubleArray();
		values[values.length-1] = comt.classifyInstance(ins);
		output.add(ins.copy(values));
	}
	DataIOFile.saveDataToXrffFile("data/trainingBestConf0_predict.xrff", output);
}
 
开发者ID:zhuyuqing,项目名称:BestConfig,代码行数:34,代码来源:BestConf.java

示例8: Main

import weka.core.Instances; //导入方法依赖的package包/类
public Main() {
    try {
        BufferedReader datafile;
        datafile = readDataFile("camping.txt");
        Instances data = new Instances(datafile);
        data.setClassIndex(data.numAttributes() - 1);

        Instances trainingData = new Instances(data, 0, 14);
        Instances testingData = new Instances(data, 14, 5);
        Evaluation evaluation = new Evaluation(trainingData);

        SMO smo = new SMO();
        smo.buildClassifier(data);

        evaluation.evaluateModel(smo, testingData);
        System.out.println(evaluation.toSummaryString());

        // Test instance 
        Instance instance = new DenseInstance(3);
        instance.setValue(data.attribute("age"), 78);
        instance.setValue(data.attribute("income"), 125700);
        instance.setValue(data.attribute("camps"), 1);            
        instance.setDataset(data);
        System.out.println("The instance: " + instance);
        System.out.println(smo.classifyInstance(instance));
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Made-Easy,代码行数:30,代码来源:Main-SVG.java

示例9: BookDecisionTree

import weka.core.Instances; //导入方法依赖的package包/类
public BookDecisionTree(String fileName) {
    try {
        BufferedReader reader = new BufferedReader(new FileReader(fileName));
        trainingData = new Instances(reader);
        trainingData.setClassIndex(trainingData.numAttributes() - 1);
    } catch (IOException ex) {
        ex.printStackTrace();
    }
}
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Made-Easy,代码行数:10,代码来源:BookDecisionTree.java

示例10: getIns

import weka.core.Instances; //导入方法依赖的package包/类
/***
	 * <p>To Merge the datasets in path array and save the total dataset in dirpath.
	 * </p>
	 * @param path String array of arff file
	 * @throws Exception
	 */
	public static void getIns(String[] path, String dirpath) throws Exception{
		
		/** Create a empty dataset total*/
		Instances total = new Instances("total3500", getStandAttrs(), 1);
		
		total.setClassIndex(total.numAttributes() - 1);
		
		int len = path.length;
		Instances[] temp = new Instances[len];
		
		for(int i=0; i<path.length; i++){
			
			temp[i] = DataSource.read(path[i]);
			temp[i].setClassIndex(temp[i].numAttributes() - 1);
			
			total.addAll(temp[i]);
			System.out.println("adding " + path[i] + " " + temp[i].numInstances());
//			System.out.println("data" + total.numInstances() + "\n");
		}
		
		String totalName = dirpath+"total3500" + String.valueOf(System.currentTimeMillis()) + ".arff";
		
		DataSink.write(totalName,
				total);
		System.out.println("Writing the data into [" + totalName + "] successfully.\n");
	}
 
开发者ID:Gu-Youngfeng,项目名称:CraTer,代码行数:33,代码来源:InsMerge.java

示例11: getEvalResultbyNo

import weka.core.Instances; //导入方法依赖的package包/类
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Only use C4.5 to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbyNo(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		/** classifiers setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);
		
		Evaluation eval = new Evaluation(ins);	
		eval.crossValidateModel(j48, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
			
	}
 
开发者ID:Gu-Youngfeng,项目名称:CraTer,代码行数:33,代码来源:ImbalanceProcessingAve.java

示例12: constructInstances

import weka.core.Instances; //导入方法依赖的package包/类
public static Instances constructInstances(List<Pair<String, Attribute>> attributesList, List<Observation> observations) {
    List<Pair<String, Attribute>> attributes = attributesList
            .stream()
            .filter(pair -> !pair.getValue().isString())
            .collect(Collectors.toList());

    Instances trainingSet = new Instances(
            "Standard set",
            (ArrayList<Attribute>) attributes
                    .stream()
                    .map(pair -> pair.getValue())
                    .collect(Collectors.toList()),
            observations.size());

    for(int i = 0; i < observations.size(); i ++) {
        Instance instance = new DenseInstance(attributes.size());

        for(Pair<String, Attribute> attributePair : attributes) {
            updateInstance(attributePair, instance, observations, i);
        }

        trainingSet.add(instance);
    }

    trainingSet.setClassIndex(attributes.stream().map(pair -> pair.getKey()).collect(Collectors.toList()).indexOf(Fields.GENDER.name()));

    return trainingSet;
}
 
开发者ID:GeorgiMateev,项目名称:twitter-user-gender-classification,代码行数:29,代码来源:FeatureVectorsFactory.java

示例13: getEvalResultbyCost

import weka.core.Instances; //导入方法依赖的package包/类
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Use C4.5 and <b>Cost-sensitive learning</b> to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbyCost(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		/**Classifier setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);
		
		CostSensitiveClassifier csc = new CostSensitiveClassifier();
		csc.setClassifier(j48);
		csc.setCostMatrix(new CostMatrix(new BufferedReader(new FileReader("files/costm"))));
		
		Evaluation eval = new Evaluation(ins);
		
		eval.crossValidateModel(csc, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
			
	}
 
开发者ID:Gu-Youngfeng,项目名称:CraTer,代码行数:38,代码来源:ImbalanceProcessingAve.java

示例14: train

import weka.core.Instances; //导入方法依赖的package包/类
/**
 * This function only train the model with the trainSet as it is.
 * In other words, no feature removal will done here.
 * 
 * @param trainSet
 * @throws Exception 
 */
public void train(Instances trainSet) throws Exception {
    trainSet.setClassIndex(trainSet.numAttributes() - 1);
    // set classifier: use linear SVM only
    String[] options = weka.core.Utils.splitOptions("-K 0");
    String classifierName = "weka.classifiers.functions.LibSVM";
    this.m_Classifier = Classifier.forName(classifierName, options);
    // get probability instead of explicit prediction
    LibSVM libsvm = (LibSVM) this.m_Classifier;
    libsvm.setProbabilityEstimates(true);
    // build model
    this.m_Classifier.buildClassifier(trainSet);
}
 
开发者ID:NLPReViz,项目名称:emr-nlp-server,代码行数:20,代码来源:SVMPredictor.java

示例15: createInstances

import weka.core.Instances; //导入方法依赖的package包/类
/**
 * Creates a basic model.
 *
 * @return the model
 */
private Instances createInstances() {
	ArrayList<Attribute> attributes = new ArrayList<>();
	attributes.add(new Attribute("text", (ArrayList<String>) null));
	attributes.add(new Attribute("@@[email protected]@", CLASSES));
	Instances instances = new Instances("instances", attributes, 0);
	instances.setClassIndex(instances.numAttributes() - 1);
	return instances;
}
 
开发者ID:venilnoronha,项目名称:movie-rating-prediction,代码行数:14,代码来源:SVMTrainerImpl.java


注:本文中的weka.core.Instances.setClassIndex方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。