当前位置: 首页>>代码示例>>Java>>正文


Java Instances.instance方法代码示例

本文整理汇总了Java中weka.core.Instances.instance方法的典型用法代码示例。如果您正苦于以下问题:Java Instances.instance方法的具体用法?Java Instances.instance怎么用?Java Instances.instance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在weka.core.Instances的用法示例。


在下文中一共展示了Instances.instance方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCreateMarkovAttributeSetFilter

import weka.core.Instances; //导入方法依赖的package包/类
/**
     * testCreateMarkovAttributeSetFilter
     */
    @Test
    public void testCreateMarkovAttributeSetFilter() throws Exception {
        // Test that we can create a filter from an MarkovAttributeSet
        MarkovAttributeSet aset = new MarkovAttributeSet(data, FeatureUtil.getFeatureKeyPrefix(ParamArrayLengthFeature.class));
        assertEquals(CatalogUtil.getArrayProcParameters(catalog_proc).size(), aset.size());
        
        Filter filter = aset.createFilter(data);
        Instances newData = Filter.useFilter(data, filter);
        for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
            Instance processed = newData.instance(i);
//            System.err.println(processed);
            assertEquals(aset.size(), processed.numAttributes());
        } // WHILE
        assertEquals(data.numInstances(), newData.numInstances());
//        System.err.println("MarkovAttributeSet: " + aset);
        
    }
 
开发者ID:s-store,项目名称:sstore-soft,代码行数:21,代码来源:TestFeatureClusterer.java

示例2: distributionsForInstances

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public double[][] distributionsForInstances(Instances batch) {

	double[][] dists = new double[batch.numInstances()][2];
	for (int i = 0; i < batch.numInstances(); i++) {
		Instance ins = batch.instance(i);
		dists[i] = new double[2];
		dists[i][1] = this.scoreInstance(ins);
	}

	return dists;
}
 
开发者ID:UKPLab,项目名称:ijcnlp2017-cmaps,代码行数:13,代码来源:RankingSVM.java

示例3: generateDecisionTree

import weka.core.Instances; //导入方法依赖的package包/类
protected Classifier generateDecisionTree(AbstractClusterer clusterer, MarkovAttributeSet aset, Instances data) throws Exception {
    // We need to create a new Attribute that has the ClusterId
    Instances newData = data; // new Instances(data);
    newData.insertAttributeAt(new Attribute("ClusterId"), newData.numAttributes());
    Attribute cluster_attr = newData.attribute(newData.numAttributes()-1);
    assert(cluster_attr != null);
    assert(cluster_attr.index() > 0);
    newData.setClass(cluster_attr);
    
    // We will then tell the Classifier to predict that ClusterId based on the MarkovAttributeSet
    ObjectHistogram<Integer> cluster_h = new ObjectHistogram<Integer>();
    for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
        // Grab the Instance and throw it at the the clusterer to get the target cluster
        Instance inst = newData.instance(i);
        int c = (int)clusterer.clusterInstance(inst);
        inst.setClassValue(c);
        cluster_h.put(c);
    } // FOR
    System.err.println("Number of Elements: " + cluster_h.getValueCount());
    System.err.println(cluster_h);

    NumericToNominal filter = new NumericToNominal();
    filter.setInputFormat(newData);
    newData = Filter.useFilter(newData, filter);
    
    String output = this.catalog_proc.getName() + "-labeled.arff";
    FileUtil.writeStringToFile(output, newData.toString());
    LOG.info("Wrote labeled data set to " + output);
    
    // Decision Tree
    J48 j48 = new J48();
    String options[] = {
        "-S", Integer.toString(this.rand.nextInt()),
        
    };
    j48.setOptions(options);

    // Make sure we add the ClusterId attribute to a new MarkovAttributeSet so that
    // we can tell the Classifier to classify that!
    FilteredClassifier fc = new FilteredClassifier();
    MarkovAttributeSet classifier_aset = new MarkovAttributeSet(aset);
    classifier_aset.add(cluster_attr);
    fc.setFilter(classifier_aset.createFilter(newData));
    fc.setClassifier(j48);
    
    // Bombs away!
    fc.buildClassifier(newData);
    
    return (fc);
}
 
开发者ID:s-store,项目名称:sstore-soft,代码行数:51,代码来源:FeatureClusterer.java

示例4: updateModels

import weka.core.Instances; //导入方法依赖的package包/类
/**
     * Train a model and create the feature weight.
     * This implementation will take each highlight span feedback as a "normal" 
     * feedback. Hence, we will merge the instanceDS and the feedbackDS into one 
     * training set for the new model.
     * 
     * @param sessionID
     * @param userID
     * @param varID
     * @throws Exception 
     */
    public void updateModels(String sessionID, String userID, String varID,
            SVMPredictor model) throws Exception {
        // if the model exists, do nothing
        String fn_model = getModelFileName(sessionID, userID, varID);
//        HashMap<String, Double> predictorFeatureWeightMap;
        
        if(! Util.fileExists(fn_model)) {
//            attrNameToIndexMap.put(varID, attrNameToIndexMap.size());
            
            // merge the 2 instance sets
            String fn_instanceDS = getInstanceDSFileName(sessionID, userID, varID);
            String fn_feedbackDS = getFeedbackDSFileName(sessionID, userID, varID);
            if(! Util.fileExists(fn_instanceDS)) {
                throw new UnsupportedOperationException("Training data set does not exist. "
                        + "Create the (instance) variable value data set for " +
                        fn_model + " before re-train it");
            }
            if(! Util.fileExists(fn_feedbackDS)) {
                throw new UnsupportedOperationException("Training data set does not exist. "
                        + "Create the (feedback) highlight span data set for " +
                        fn_model + " before re-train it");
            }
            
            Instances instanceDS = loadInstancesFromFile(fn_instanceDS);
            Instances feedbackDS = loadInstancesFromFile(fn_feedbackDS);
            for(int i = 0; i < feedbackDS.numInstances(); i++) {
                Instance feedbackInstance = feedbackDS.instance(i);
                instanceDS.add(feedbackInstance);
            }
            
            // train the model
            model.train((Object)instanceDS);
            
            // save model
            model.saveModel(fn_model);
//            predictors.add(model);
            
            // save feature weight + keyword weight
            String fn_featureWeight = getFeatureWeightFileName(sessionID, userID, varID);
            String[] globalFeatureName = Util.loadList(fn_globalFeatureName);
            model.saveFeatureWeights(globalFeatureName, fn_featureWeight);
//            // create a hash map for this variable's feature weight
//            predictorFeatureWeightMap = new HashMap<>();
//            List<Map.Entry<String, Double>> predictorsSortedTermWeightList = new ArrayList<>();
//            for(int i = 0; i < globalFeatureName.length; i++) {
//                predictorFeatureWeightMap.put(globalFeatureName[i], featureWeights[i]);
//                predictorsSortedTermWeightList.add(
//                        new AbstractMap.SimpleEntry<>(globalFeatureName[i], 
//                        featureWeights[i]));
//            }
//            predictorsFeatureWeightMap.add(predictorFeatureWeightMap);
//            predictorsSortedTermWeightMap.add(predictorsSortedTermWeightList);
            
            // create tuple for keyword weight list
//            String fn_keywordWeight = getKeywordFeatureWeightFileName(featureWeightFolder,
//                    varID, sessionID, userID);
//            String[][] keywordWeightTable = Util.loadTable(fn_keywordWeight);
//            List<Map.Entry<String, Double>> predictorsKeywordWeightList = new ArrayList<>();
//            for(int i = 0; i < keywordWeightTable.length; i++) {
//                predictorsKeywordWeightList.add(
//                        new AbstractMap.SimpleEntry<>(keywordWeightTable[i][0], 
//                        Double.parseDouble(keywordWeightTable[i][1])));
//            }
//            predictorsKeywordWeightMap.add(predictorsKeywordWeightList);
        }
    }
 
开发者ID:NLPReViz,项目名称:emr-nlp-server,代码行数:78,代码来源:TextFileFeedbackManager.java

示例5: buildClassifier

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public void buildClassifier(Instances data) throws Exception {
   	// Initialise training dataset
	Attribute classAttribute = data.classAttribute();
	
	classedData = new HashMap<>();
	classedDataIndices = new HashMap<>();
	for (int c = 0; c < data.numClasses(); c++) {
		classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
		classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
	}

	train = new SymbolicSequence[data.numInstances()];
	classMap = new String[train.length];
	maxLength = 0;
	for (int i = 0; i < train.length; i++) {
		Instance sample = data.instance(i);
		MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
		maxLength = Math.max(maxLength, sequence.length);
		int shift = (sample.classIndex() == 0) ? 1 : 0;
		for (int t = 0; t < sequence.length; t++) {
			sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
		}
		train[i] = new SymbolicSequence(sequence);
		String clas = sample.stringValue(classAttribute);
		classMap[i] = clas;
		classedData.get(clas).add(train[i]);
		classedDataIndices.get(clas).add(i);
	}
	warpingMatrix = new double[maxLength][maxLength];	
	U = new double[maxLength];
	L = new double[maxLength];
	
	maxWindow = Math.round(1 * maxLength);
	searchResults = new String[maxWindow+1];
	nns = new int[maxWindow+1][train.length];
	dist = new double[train.length][train.length];
	
	// Start searching for the best window
	searchBestWarpingWindow();
	
	// Saving best windows found
	System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
 
开发者ID:ChangWeiTan,项目名称:FastWWSearch,代码行数:45,代码来源:LbKeoghPrunedDTW.java

示例6: buildClassifier

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public void buildClassifier(Instances data) throws Exception {
   	// Initialise training dataset
   	Attribute classAttribute = data.classAttribute();
	
	classedData = new HashMap<>();
	classedDataIndices = new HashMap<>();
	for (int c = 0; c < data.numClasses(); c++) {
		classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
		classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
	}

	train = new SymbolicSequence[data.numInstances()];
	classMap = new String[train.length];
	maxLength = 0;
	for (int i = 0; i < train.length; i++) {
		Instance sample = data.instance(i);
		MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
		maxLength = Math.max(maxLength, sequence.length);
		int shift = (sample.classIndex() == 0) ? 1 : 0;
		for (int t = 0; t < sequence.length; t++) {
			sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
		}
		train[i] = new SymbolicSequence(sequence);
		String clas = sample.stringValue(classAttribute);
		classMap[i] = clas;
		classedData.get(clas).add(train[i]);
		classedDataIndices.get(clas).add(i);
	}
			
	warpingMatrix = new double[maxLength][maxLength];
	U = new double[maxLength];
	L = new double[maxLength];
	U1 = new double[maxLength];
	L1 = new double[maxLength];
	
	maxWindow = Math.round(1 * maxLength);
	searchResults = new String[maxWindow+1];
	nns = new int[maxWindow+1][train.length];
	dist = new double[maxWindow+1][train.length];

	cache = new SequenceStatsCache(train, maxWindow);
	
	lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length];
	
	for (int i = 0; i < train.length; i++) {
		for (int j  = 0; j < train.length; j++) {
			lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache);
		}
	}
	
	// Start searching for the best window
	searchBestWarpingWindow();
	
	// Saving best windows found
	System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
 
开发者ID:ChangWeiTan,项目名称:FastWWSearch,代码行数:58,代码来源:UCRSuite.java

示例7: buildClassifier

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public void buildClassifier(Instances data) throws Exception {
   	// Initialise training dataset
	Attribute classAttribute = data.classAttribute();
	
	classedData = new HashMap<>();
	classedDataIndices = new HashMap<>();
	for (int c = 0; c < data.numClasses(); c++) {
		classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
		classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
	}

	train = new SymbolicSequence[data.numInstances()];
	classMap = new String[train.length];
	maxLength = 0;
	for (int i = 0; i < train.length; i++) {
		Instance sample = data.instance(i);
		MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
		maxLength = Math.max(maxLength, sequence.length);
		int shift = (sample.classIndex() == 0) ? 1 : 0;
		for (int t = 0; t < sequence.length; t++) {
			sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
		}
		train[i] = new SymbolicSequence(sequence);
		String clas = sample.stringValue(classAttribute);
		classMap[i] = clas;
		classedData.get(clas).add(train[i]);
		classedDataIndices.get(clas).add(i);
	}
			
	warpingMatrix = new double[maxLength][maxLength];
	U = new double[maxLength];
	L = new double[maxLength];
	U1 = new double[maxLength];
	L1 = new double[maxLength];
	
	maxWindow = Math.round(1 * maxLength);
	searchResults = new String[maxWindow+1];
	nns = new int[maxWindow+1][train.length];
	dist = new double[train.length][train.length];

	cache = new SequenceStatsCache(train, maxWindow);
	
	lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length];
	
	for (int i = 0; i < train.length; i++) {
		for (int j  = 0; j < train.length; j++) {
			lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache);
		}
	}
	
	// Start searching for the best window
	searchBestWarpingWindow();

	// Saving best windows found
	System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
 
开发者ID:ChangWeiTan,项目名称:FastWWSearch,代码行数:58,代码来源:UCRSuitePrunedDTW.java

示例8: buildClassifier

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public void buildClassifier(Instances data) throws Exception {
   	// Initialise training dataset
	Attribute classAttribute = data.classAttribute();
	
	classedData = new HashMap<>();
	classedDataIndices = new HashMap<>();
	for (int c = 0; c < data.numClasses(); c++) {
		classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
		classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
	}

	train = new SymbolicSequence[data.numInstances()];
	classMap = new String[train.length];
	maxLength = 0;
	for (int i = 0; i < train.length; i++) {
		Instance sample = data.instance(i);
		MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
		maxLength = Math.max(maxLength, sequence.length);
		int shift = (sample.classIndex() == 0) ? 1 : 0;
		for (int t = 0; t < sequence.length; t++) {
			sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
		}
		train[i] = new SymbolicSequence(sequence);
		String clas = sample.stringValue(classAttribute);
		classMap[i] = clas;
		classedData.get(clas).add(train[i]);
		classedDataIndices.get(clas).add(i);
	}
	
	warpingMatrix = new double[maxLength][maxLength];
	U = new double[maxLength];
	L = new double[maxLength];
	
	maxWindow = Math.round(1 * maxLength);
	searchResults = new String[maxWindow+1];
	nns = new int[maxWindow+1][train.length];
	dist = new double[maxWindow+1][train.length];
	
	// Start searching for the best window
	searchBestWarpingWindow();
	
	// Saving best windows found
	System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
 
开发者ID:ChangWeiTan,项目名称:FastWWSearch,代码行数:46,代码来源:WindowSearcher.java

示例9: processCollection

import weka.core.Instances; //导入方法依赖的package包/类
@Override
public void processCollection() {

	String topic = this.parent.getTargetLocation().substring(this.parent.getTargetLocation().lastIndexOf("/") + 1);

	// get extracted concepts and propositions
	Extractor ex = this.parent.getPrevExtractor(this);
	this.concepts = ex.getConcepts();
	this.propositions = ex.getPropositions();
	for (Concept c : this.concepts)
		this.fixLemmas(c);

	// group by same label
	Map<Concept, ConceptGroup> groups = LemmaGrouper.group(this.concepts);
	List<Concept> repConcepts = new ArrayList<Concept>(groups.keySet());
	this.parent.log(this, "unique concepts: " + groups.size());

	// build all pairs for classifier
	List<CPair> pairs = this.buildPairs(repConcepts);
	this.parent.log(this, "concept pairs: " + pairs.size());

	// compute similarity features
	Instances features = this.computeFeatures(pairs, topic);

	// apply classifier
	ObjectDoubleMap<CPair> predictions = new ObjectDoubleHashMap<CPair>(pairs.size());
	try {
		Classifier clf = (Classifier) SerializationHelper.read(modelName);
		for (int i = 0; i < pairs.size(); i++) {
			CPair pair = pairs.get(i);
			Instance feat = features.instance(i);
			double[] pred = clf.distributionForInstance(feat);
			predictions.put(pair, pred[1]);
		}
	} catch (Exception e) {
		e.printStackTrace();
	}

	// clustering
	Set<List<Concept>> clusters = clusterer.createClusters(new HashSet<Concept>(repConcepts), predictions);

	// create final cluster and update relations
	this.updateDataStructures(clusters, groups);
	this.clusters = clusters;

	this.parent.log(this, "grouped concepts: " + concepts.size());
	this.parent.log(this, "relations: " + propositions.size());
}
 
开发者ID:UKPLab,项目名称:ijcnlp2017-cmaps,代码行数:49,代码来源:ConceptGrouperSimLog.java


注:本文中的weka.core.Instances.instance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。