當前位置: 首頁>>代碼示例>>Java>>正文


Java Instances類代碼示例

本文整理匯總了Java中weka.core.Instances的典型用法代碼示例。如果您正苦於以下問題:Java Instances類的具體用法?Java Instances怎麽用?Java Instances使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Instances類屬於weka.core包,在下文中一共展示了Instances類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: runJ48

import weka.core.Instances; //導入依賴的package包/類
public static void runJ48(Instances trainSet, Instances testSet) {
    System.out.println("#####################  J48  #####################");

    Classifier model = null;
    Train train = new Train(trainSet);

    /*
     * TRAIN
     */
    try {
        model = train.getJ48Model();
    } catch (Exception e) {
        e.printStackTrace();
    }

    /*
     * TEST
     */
    Test test = new Test(trainSet, testSet);
    test.testModel(model);

    System.out.println("#####################  END OF J48  #####################");
    System.out.print("\n\n\n");
}
 
開發者ID:GeorgiMateev,項目名稱:twitter-user-gender-classification,代碼行數:25,代碼來源:Classification.java

示例2: runSMO

import weka.core.Instances; //導入依賴的package包/類
public static void runSMO(Instances trainSet, Instances testSet) {
    System.out.println("#####################  SMO (SVM)  #####################");

    Classifier model = null;
    Train train = new Train(trainSet);

    /*
     * TRAIN
     */
    try {
        model = train.getSMO();
    } catch (Exception e) {
        e.printStackTrace();
    }

    /*
     * TEST
     */
    Test test = new Test(trainSet, testSet);
    test.testModel(model);

    System.out.println("#####################  END OF SMO (SVM)  #####################");
    System.out.print("\n\n\n");
}
 
開發者ID:GeorgiMateev,項目名稱:twitter-user-gender-classification,代碼行數:25,代碼來源:Classification.java

示例3: instancesToDMatrix

import weka.core.Instances; //導入依賴的package包/類
public static DMatrix instancesToDMatrix(Instances instances) throws XGBoostError {
    long[] rowHeaders = new long[instances.size()+1];
    rowHeaders[0]=0;
    List<Float> dataList = new ArrayList<>();
    List<Integer> colList = new ArrayList<>();
    float[] labels = new float[instances.size()];

    for(int i=0; i<instances.size(); i++) {
        Instance instance = instances.get(i);
        rowHeaders[i] = dataList.size();
        processInstance(instance, dataList, colList);
        labels[i] = (float) instance.classValue();
    }
    rowHeaders[rowHeaders.length - 1] = dataList.size();
    int colNum = instances.numAttributes()-1;
    DMatrix dMatrix = createDMatrix(rowHeaders, dataList, colList, colNum);

    dMatrix.setLabel(labels);
    return dMatrix;

}
 
開發者ID:SigDelta,項目名稱:weka-xgboost,代碼行數:22,代碼來源:DMatrixLoader.java

示例4: preProcessData

import weka.core.Instances; //導入依賴的package包/類
public static Instances preProcessData(Instances data) throws Exception{
	
	/* 
	 * Remove useless attributes
	 */
	RemoveUseless removeUseless = new RemoveUseless();
	removeUseless.setOptions(new String[] { "-M", "99" });	// threshold
	removeUseless.setInputFormat(data);
	data = Filter.useFilter(data, removeUseless);

	
	/* 
	 * Remove useless attributes
	 */
	ReplaceMissingValues fixMissing = new ReplaceMissingValues();
	fixMissing.setInputFormat(data);
	data = Filter.useFilter(data, fixMissing);
	

	/* 
	 * Remove useless attributes
	 */
	Discretize discretizeNumeric = new Discretize();
	discretizeNumeric.setOptions(new String[] {
			"-O",
			"-M",  "-1.0", 
			"-B",  "4",  // no of bins
			"-R",  "first-last"}); //range of attributes
	fixMissing.setInputFormat(data);
	data = Filter.useFilter(data, fixMissing);

	/* 
	 * Select only informative attributes
	 */
	InfoGainAttributeEval eval = new InfoGainAttributeEval();
	Ranker search = new Ranker();
	search.setOptions(new String[] { "-T", "0.001" });	// information gain threshold
	AttributeSelection attSelect = new AttributeSelection();
	attSelect.setEvaluator(eval);
	attSelect.setSearch(search);
	
	// apply attribute selection
	attSelect.SelectAttributes(data);
	
	// remove the attributes not selected in the last run
	data = attSelect.reduceDimensionality(data);
	
	

	return data;
}
 
開發者ID:PacktPublishing,項目名稱:Machine-Learning-End-to-Endguide-for-Java-developers,代碼行數:52,代碼來源:KddCup.java

示例5: getBestPerfFrom

import weka.core.Instances; //導入依賴的package包/類
public static void getBestPerfFrom(String path){
	try {
		BestConf bestconf = new BestConf();
		Instances trainingSet = DataIOFile.loadDataFromArffFile(path);
		Instance best = trainingSet.firstInstance();
		//set the best configuration to the cluster
		Map<Attribute,Double> attsmap = new HashMap<Attribute,Double>();
		for(int i=0;i<best.numAttributes()-1;i++){
			attsmap.put(best.attribute(i), best.value(i));
		}

		double bestPerf = bestconf.setOptimal(attsmap, "getBestPerfFrom");
		System.out.println("=========================================");
		System.err.println("The actual performance for the best point is : "+bestPerf);
		System.out.println("=========================================");
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
開發者ID:zhuyuqing,項目名稱:BestConfig,代碼行數:20,代碼來源:BestConf.java

示例6: createFilter

import weka.core.Instances; //導入依賴的package包/類
public Filter createFilter(Instances data) throws Exception {
    Set<Integer> indexes = new HashSet<Integer>();
    for (int i = 0, cnt = this.size(); i < cnt; i++) {
        indexes.add(this.get(i).index());
    } // FOR
    
    SortedSet<Integer> to_remove = new TreeSet<Integer>(); 
    for (int i = 0, cnt = data.numAttributes(); i < cnt; i++) {
        if (indexes.contains(i) == false) {
            to_remove.add(i+1);
        }
    } // FOR
    
    Remove filter = new Remove();
    filter.setInputFormat(data);
    String options[] = { "-R", StringUtil.join(",", to_remove) };
    filter.setOptions(options);
    return (filter);
}
 
開發者ID:s-store,項目名稱:sstore-soft,代碼行數:20,代碼來源:MarkovAttributeSet.java

示例7: constructMarkovModels

import weka.core.Instances; //導入依賴的package包/類
protected Map<Integer, MarkovGraphsContainer> constructMarkovModels(MarkovAttributeSet aset, Instances data) throws Exception {
    
    // Create an ExecutionState for this run
    ExecutionState state = (ExecutionState)this.state_pool.borrowObject();
    state.init(this.createClusterer(aset, data));
    
    // Construct the MarkovGraphs for each Partition/Cluster using the Training Data Set
    this.generateMarkovGraphs(state, data);
    
    // Generate the MarkovModels for the different partitions+clusters
    this.generateMarkovCostModels(state);
    
    Map<Integer, MarkovGraphsContainer> ret = new HashMap<Integer, MarkovGraphsContainer>();
    for (int p = 0; p < state.markovs_per_partition.length; p++) {
        ret.put(p, state.markovs_per_partition[p]);
    } // FOR
    return (ret);
}
 
開發者ID:s-store,項目名稱:sstore-soft,代碼行數:19,代碼來源:FeatureClusterer.java

示例8: testCreateMarkovAttributeSetFilter

import weka.core.Instances; //導入依賴的package包/類
/**
     * testCreateMarkovAttributeSetFilter
     */
    @Test
    public void testCreateMarkovAttributeSetFilter() throws Exception {
        // Test that we can create a filter from an MarkovAttributeSet
        MarkovAttributeSet aset = new MarkovAttributeSet(data, FeatureUtil.getFeatureKeyPrefix(ParamArrayLengthFeature.class));
        assertEquals(CatalogUtil.getArrayProcParameters(catalog_proc).size(), aset.size());
        
        Filter filter = aset.createFilter(data);
        Instances newData = Filter.useFilter(data, filter);
        for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
            Instance processed = newData.instance(i);
//            System.err.println(processed);
            assertEquals(aset.size(), processed.numAttributes());
        } // WHILE
        assertEquals(data.numInstances(), newData.numInstances());
//        System.err.println("MarkovAttributeSet: " + aset);
        
    }
 
開發者ID:s-store,項目名稱:sstore-soft,代碼行數:21,代碼來源:TestFeatureClusterer.java

示例9: getDist

import weka.core.Instances; //導入依賴的package包/類
/**
 * <p>To get the distribution of inTrace and outTrace instance in given dataset in <b>path</b>.</p>
 * @param ins Instances of each project
 * @throws Exception 
 */
public static void getDist(String path) throws Exception{
	
	Instances ins = DataSource.read(path);
	int numAttr = ins.numAttributes();
	ins.setClassIndex(numAttr-1);
	
	int numIns = ins.numInstances();
	int intrace = 0;
	int outtrace = 0;
	for(int i=0; i<numIns; i++){
		if(ins.get(i).stringValue(ins.attribute(ins.classIndex())).equals("InTrace")){
			intrace++;
		}else{	
			outtrace++;
		}
	}
	
	System.out.printf("[ %-30s ] inTrace:%4d, outTrace:%4d.\n", path, intrace, outtrace);
}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:25,代碼來源:StatisticalProject.java

示例10: getEvalResultbySMOTE

import weka.core.Instances; //導入依賴的package包/類
/***
	 * <p>To get 10-fold cross validation in one single arff in <b>path</b></p>
	 * <p>Use C4.5 and <b>SMOTE</b> to classify the dataset.</p>
	 * @param path dataset path
	 * @throws Exception
	 */
	public static void getEvalResultbySMOTE(String path, int index) throws Exception{
		
		Instances ins = DataSource.read(path);
		int numAttr = ins.numAttributes();
		ins.setClassIndex(numAttr - 1);
		
		SMOTE smote = new SMOTE();
		smote.setInputFormat(ins);
		
		/** classifiers setting*/
		J48 j48 = new J48();
//		j48.setConfidenceFactor(0.4f);
		j48.buildClassifier(ins);

		FilteredClassifier fc = new FilteredClassifier();
		fc.setClassifier(j48);
		fc.setFilter(smote);
			
		Evaluation eval = new Evaluation(ins);	
		eval.crossValidateModel(fc, ins, 10, new Random(1));
		
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(0), eval.recall(0), eval.fMeasure(0));
//		System.out.printf(" %4.3f %4.3f %4.3f", eval.precision(1), eval.recall(1), eval.fMeasure(1));
//		System.out.printf(" %4.3f \n\n", (1-eval.errorRate()));
		results[index][0] = eval.precision(0);
		results[index][1] = eval.recall(0);
		results[index][2] = eval.fMeasure(0);
		results[index][3] = eval.precision(1);
		results[index][4] = eval.recall(1);
		results[index][5] = eval.fMeasure(1);
		results[index][6] = 1-eval.errorRate();
				
	}
 
開發者ID:Gu-Youngfeng,項目名稱:CraTer,代碼行數:40,代碼來源:ImbalanceProcessingAve.java

示例11: ModelClassifier

import weka.core.Instances; //導入依賴的package包/類
public ModelClassifier() {
    name = new Attribute("name");
    type = new Attribute("type");
    attributes = new ArrayList();
    classVal = new ArrayList();
    classVal.add("Monday");
    classVal.add("Tuesday");
    classVal.add("Wednesday");
    classVal.add("Thursday");
    classVal.add("Friday");
    classVal.add("Saturday");
    classVal.add("Sunday");
    
    attributes.add(name);
    attributes.add(type);

    attributes.add(new Attribute("class", classVal));
    dataRaw = new Instances("TestInstances", attributes, 0);
    dataRaw.setClassIndex(dataRaw.numAttributes() - 1);
}
 
開發者ID:sfahadahmed,項目名稱:hungrydragon,代碼行數:21,代碼來源:ModelClassifier.java

示例12: predictDataDistribution

import weka.core.Instances; //導入依賴的package包/類
protected double[][] predictDataDistribution(Instances unlabeled) throws Exception {
        // set class attribute
        unlabeled.setClassIndex(unlabeled.numAttributes() - 1);

        // distribution for instance
        double[][] dist = new double[unlabeled.numInstances()][unlabeled.numClasses()];

        // label instances
        for (int i = 0; i < unlabeled.numInstances(); i++) {
//            System.out.println("debug: "+this.getClass().getName()+": classifier: "+m_Classifier.toString());
            LibSVM libsvm = (LibSVM) m_Classifier;
            libsvm.setProbabilityEstimates(true);
            double[] instanceDist = libsvm.distributionForInstance(unlabeled.instance(i));
            dist[i] = instanceDist;
        }

        return dist;
    }
 
開發者ID:NLPReViz,項目名稱:emr-nlp-server,代碼行數:19,代碼來源:CertSVMPredictor.java

示例13: predictInstanceDistribution

import weka.core.Instances; //導入依賴的package包/類
public double[] predictInstanceDistribution(Reader reader) throws Exception {
    // assume that the file contains only 1 instance
    // load instances
    Instances data = new Instances(reader);
    // remove reportID attribute
    String[] options = weka.core.Utils.splitOptions("-R 1");  // removes the first attribute in instances (should be the document id?)
    String filterName = "weka.filters.unsupervised.attribute.Remove";
    Filter filter = (Filter) Class.forName(filterName).newInstance();
    if (filter instanceof OptionHandler) {
        ((OptionHandler) filter).setOptions(options);
    }
    filter.setInputFormat(data);
    // make the instances
    Instances unlabeled = Filter.useFilter(data, filter);

    double[][] dist = this.predictDataDistribution(unlabeled);
    return dist[0];
}
 
開發者ID:NLPReViz,項目名稱:emr-nlp-server,代碼行數:19,代碼來源:CertSVMPredictor.java

示例14: trainModelFromFile

import weka.core.Instances; //導入依賴的package包/類
public void trainModelFromFile(String fnTrainData) throws Exception {
    // load instances
    Instances data = new Instances(new BufferedReader(new FileReader(fnTrainData)));
    // preprocess instances
    String[] options = weka.core.Utils.splitOptions("-R 1");
    String filterName = "weka.filters.unsupervised.attribute.Remove";
    Filter filter = (Filter) Class.forName(filterName).newInstance();
    if (filter instanceof OptionHandler) {
        ((OptionHandler) filter).setOptions(options);
    }
    filter.setInputFormat(data);
    // make the instances
    Instances unlabeled = Filter.useFilter(data, filter);
    // train model
    this.trainModel(unlabeled);
}
 
開發者ID:NLPReViz,項目名稱:emr-nlp-server,代碼行數:17,代碼來源:CertSVMPredictor.java

示例15: getTrainSet

import weka.core.Instances; //導入依賴的package包/類
public Instances getTrainSet(int foldNumber, int foldTotal, String fnData) throws Exception {
    // load instances
    Instances data = new Instances(new BufferedReader(new FileReader(fnData)));
    data.setClassIndex(data.numAttributes() - 1);
    Instances trainSet = data.trainCV(foldTotal, foldNumber);

    return trainSet;
}
 
開發者ID:NLPReViz,項目名稱:emr-nlp-server,代碼行數:9,代碼來源:CertSVMPredictor.java


注:本文中的weka.core.Instances類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。