本文整理匯總了Java中weka.core.Instances.instance方法的典型用法代碼示例。如果您正苦於以下問題:Java Instances.instance方法的具體用法?Java Instances.instance怎麽用?Java Instances.instance使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類weka.core.Instances
的用法示例。
在下文中一共展示了Instances.instance方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testCreateMarkovAttributeSetFilter
import weka.core.Instances; //導入方法依賴的package包/類
/**
* testCreateMarkovAttributeSetFilter
*/
@Test
public void testCreateMarkovAttributeSetFilter() throws Exception {
// Test that we can create a filter from an MarkovAttributeSet
MarkovAttributeSet aset = new MarkovAttributeSet(data, FeatureUtil.getFeatureKeyPrefix(ParamArrayLengthFeature.class));
assertEquals(CatalogUtil.getArrayProcParameters(catalog_proc).size(), aset.size());
Filter filter = aset.createFilter(data);
Instances newData = Filter.useFilter(data, filter);
for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
Instance processed = newData.instance(i);
// System.err.println(processed);
assertEquals(aset.size(), processed.numAttributes());
} // WHILE
assertEquals(data.numInstances(), newData.numInstances());
// System.err.println("MarkovAttributeSet: " + aset);
}
示例2: distributionsForInstances
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public double[][] distributionsForInstances(Instances batch) {
double[][] dists = new double[batch.numInstances()][2];
for (int i = 0; i < batch.numInstances(); i++) {
Instance ins = batch.instance(i);
dists[i] = new double[2];
dists[i][1] = this.scoreInstance(ins);
}
return dists;
}
示例3: generateDecisionTree
import weka.core.Instances; //導入方法依賴的package包/類
protected Classifier generateDecisionTree(AbstractClusterer clusterer, MarkovAttributeSet aset, Instances data) throws Exception {
// We need to create a new Attribute that has the ClusterId
Instances newData = data; // new Instances(data);
newData.insertAttributeAt(new Attribute("ClusterId"), newData.numAttributes());
Attribute cluster_attr = newData.attribute(newData.numAttributes()-1);
assert(cluster_attr != null);
assert(cluster_attr.index() > 0);
newData.setClass(cluster_attr);
// We will then tell the Classifier to predict that ClusterId based on the MarkovAttributeSet
ObjectHistogram<Integer> cluster_h = new ObjectHistogram<Integer>();
for (int i = 0, cnt = newData.numInstances(); i < cnt; i++) {
// Grab the Instance and throw it at the the clusterer to get the target cluster
Instance inst = newData.instance(i);
int c = (int)clusterer.clusterInstance(inst);
inst.setClassValue(c);
cluster_h.put(c);
} // FOR
System.err.println("Number of Elements: " + cluster_h.getValueCount());
System.err.println(cluster_h);
NumericToNominal filter = new NumericToNominal();
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
String output = this.catalog_proc.getName() + "-labeled.arff";
FileUtil.writeStringToFile(output, newData.toString());
LOG.info("Wrote labeled data set to " + output);
// Decision Tree
J48 j48 = new J48();
String options[] = {
"-S", Integer.toString(this.rand.nextInt()),
};
j48.setOptions(options);
// Make sure we add the ClusterId attribute to a new MarkovAttributeSet so that
// we can tell the Classifier to classify that!
FilteredClassifier fc = new FilteredClassifier();
MarkovAttributeSet classifier_aset = new MarkovAttributeSet(aset);
classifier_aset.add(cluster_attr);
fc.setFilter(classifier_aset.createFilter(newData));
fc.setClassifier(j48);
// Bombs away!
fc.buildClassifier(newData);
return (fc);
}
示例4: updateModels
import weka.core.Instances; //導入方法依賴的package包/類
/**
* Train a model and create the feature weight.
* This implementation will take each highlight span feedback as a "normal"
* feedback. Hence, we will merge the instanceDS and the feedbackDS into one
* training set for the new model.
*
* @param sessionID
* @param userID
* @param varID
* @throws Exception
*/
public void updateModels(String sessionID, String userID, String varID,
SVMPredictor model) throws Exception {
// if the model exists, do nothing
String fn_model = getModelFileName(sessionID, userID, varID);
// HashMap<String, Double> predictorFeatureWeightMap;
if(! Util.fileExists(fn_model)) {
// attrNameToIndexMap.put(varID, attrNameToIndexMap.size());
// merge the 2 instance sets
String fn_instanceDS = getInstanceDSFileName(sessionID, userID, varID);
String fn_feedbackDS = getFeedbackDSFileName(sessionID, userID, varID);
if(! Util.fileExists(fn_instanceDS)) {
throw new UnsupportedOperationException("Training data set does not exist. "
+ "Create the (instance) variable value data set for " +
fn_model + " before re-train it");
}
if(! Util.fileExists(fn_feedbackDS)) {
throw new UnsupportedOperationException("Training data set does not exist. "
+ "Create the (feedback) highlight span data set for " +
fn_model + " before re-train it");
}
Instances instanceDS = loadInstancesFromFile(fn_instanceDS);
Instances feedbackDS = loadInstancesFromFile(fn_feedbackDS);
for(int i = 0; i < feedbackDS.numInstances(); i++) {
Instance feedbackInstance = feedbackDS.instance(i);
instanceDS.add(feedbackInstance);
}
// train the model
model.train((Object)instanceDS);
// save model
model.saveModel(fn_model);
// predictors.add(model);
// save feature weight + keyword weight
String fn_featureWeight = getFeatureWeightFileName(sessionID, userID, varID);
String[] globalFeatureName = Util.loadList(fn_globalFeatureName);
model.saveFeatureWeights(globalFeatureName, fn_featureWeight);
// // create a hash map for this variable's feature weight
// predictorFeatureWeightMap = new HashMap<>();
// List<Map.Entry<String, Double>> predictorsSortedTermWeightList = new ArrayList<>();
// for(int i = 0; i < globalFeatureName.length; i++) {
// predictorFeatureWeightMap.put(globalFeatureName[i], featureWeights[i]);
// predictorsSortedTermWeightList.add(
// new AbstractMap.SimpleEntry<>(globalFeatureName[i],
// featureWeights[i]));
// }
// predictorsFeatureWeightMap.add(predictorFeatureWeightMap);
// predictorsSortedTermWeightMap.add(predictorsSortedTermWeightList);
// create tuple for keyword weight list
// String fn_keywordWeight = getKeywordFeatureWeightFileName(featureWeightFolder,
// varID, sessionID, userID);
// String[][] keywordWeightTable = Util.loadTable(fn_keywordWeight);
// List<Map.Entry<String, Double>> predictorsKeywordWeightList = new ArrayList<>();
// for(int i = 0; i < keywordWeightTable.length; i++) {
// predictorsKeywordWeightList.add(
// new AbstractMap.SimpleEntry<>(keywordWeightTable[i][0],
// Double.parseDouble(keywordWeightTable[i][1])));
// }
// predictorsKeywordWeightMap.add(predictorsKeywordWeightList);
}
}
示例5: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[train.length][train.length];
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例6: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
U1 = new double[maxLength];
L1 = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[maxWindow+1][train.length];
cache = new SequenceStatsCache(train, maxWindow);
lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length];
for (int i = 0; i < train.length; i++) {
for (int j = 0; j < train.length; j++) {
lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache);
}
}
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例7: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
U1 = new double[maxLength];
L1 = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[train.length][train.length];
cache = new SequenceStatsCache(train, maxWindow);
lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length];
for (int i = 0; i < train.length; i++) {
for (int j = 0; j < train.length; j++) {
lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache);
}
}
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例8: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[maxWindow+1][train.length];
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例9: processCollection
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void processCollection() {
String topic = this.parent.getTargetLocation().substring(this.parent.getTargetLocation().lastIndexOf("/") + 1);
// get extracted concepts and propositions
Extractor ex = this.parent.getPrevExtractor(this);
this.concepts = ex.getConcepts();
this.propositions = ex.getPropositions();
for (Concept c : this.concepts)
this.fixLemmas(c);
// group by same label
Map<Concept, ConceptGroup> groups = LemmaGrouper.group(this.concepts);
List<Concept> repConcepts = new ArrayList<Concept>(groups.keySet());
this.parent.log(this, "unique concepts: " + groups.size());
// build all pairs for classifier
List<CPair> pairs = this.buildPairs(repConcepts);
this.parent.log(this, "concept pairs: " + pairs.size());
// compute similarity features
Instances features = this.computeFeatures(pairs, topic);
// apply classifier
ObjectDoubleMap<CPair> predictions = new ObjectDoubleHashMap<CPair>(pairs.size());
try {
Classifier clf = (Classifier) SerializationHelper.read(modelName);
for (int i = 0; i < pairs.size(); i++) {
CPair pair = pairs.get(i);
Instance feat = features.instance(i);
double[] pred = clf.distributionForInstance(feat);
predictions.put(pair, pred[1]);
}
} catch (Exception e) {
e.printStackTrace();
}
// clustering
Set<List<Concept>> clusters = clusterer.createClusters(new HashSet<Concept>(repConcepts), predictions);
// create final cluster and update relations
this.updateDataStructures(clusters, groups);
this.clusters = clusters;
this.parent.log(this, "grouped concepts: " + concepts.size());
this.parent.log(this, "relations: " + propositions.size());
}