本文整理匯總了Java中weka.core.Instances.numInstances方法的典型用法代碼示例。如果您正苦於以下問題:Java Instances.numInstances方法的具體用法?Java Instances.numInstances怎麽用?Java Instances.numInstances使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類weka.core.Instances
的用法示例。
在下文中一共展示了Instances.numInstances方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: splitWorkload
import weka.core.Instances; //導入方法依賴的package包/類
/**
*
* @param data
* @return
*/
protected Instances[] splitWorkload(Instances data) {
int offset = 0;
int all_cnt = data.numInstances();
for (SplitType stype : SplitType.values()) {
int idx = stype.ordinal();
this.split_counts[idx] = (int)Math.round(all_cnt * stype.percentage);
try {
this.splits[idx] = new Instances(data, offset, this.split_counts[idx]);
// Apply NumericToNominal filter!
NumericToNominal filter = new NumericToNominal();
filter.setInputFormat(this.splits[idx]);
this.splits[idx] = Filter.useFilter(this.splits[idx], filter);
} catch (Exception ex) {
throw new RuntimeException("Failed to split " + stype + " workload", ex);
}
offset += this.split_counts[idx];
if (debug.val) LOG.debug(String.format("%-12s%d", stype.toString()+":", this.split_counts[idx]));
} // FOR
return (this.splits);
}
示例2: getDist
import weka.core.Instances; //導入方法依賴的package包/類
/**
* <p>To get the distribution of inTrace and outTrace instance in given dataset in <b>path</b>.</p>
* @param ins Instances of each project
* @throws Exception
*/
public static void getDist(String path) throws Exception{
Instances ins = DataSource.read(path);
int numAttr = ins.numAttributes();
ins.setClassIndex(numAttr-1);
int numIns = ins.numInstances();
int intrace = 0;
int outtrace = 0;
for(int i=0; i<numIns; i++){
if(ins.get(i).stringValue(ins.attribute(ins.classIndex())).equals("InTrace")){
intrace++;
}else{
outtrace++;
}
}
System.out.printf("[ %-30s ] inTrace:%4d, outTrace:%4d.\n", path, intrace, outtrace);
}
示例3: predictDataDistribution
import weka.core.Instances; //導入方法依賴的package包/類
protected double[][] predictDataDistribution(Instances unlabeled) throws Exception {
// set class attribute
unlabeled.setClassIndex(unlabeled.numAttributes() - 1);
// distribution for instance
double[][] dist = new double[unlabeled.numInstances()][unlabeled.numClasses()];
// label instances
for (int i = 0; i < unlabeled.numInstances(); i++) {
// System.out.println("debug: "+this.getClass().getName()+": classifier: "+m_Classifier.toString());
LibSVM libsvm = (LibSVM) m_Classifier;
libsvm.setProbabilityEstimates(true);
double[] instanceDist = libsvm.distributionForInstance(unlabeled.instance(i));
dist[i] = instanceDist;
}
return dist;
}
示例4: findBestPerf
import weka.core.Instances; //導入方法依賴的package包/類
public static Instance findBestPerf(Instances data){
int idx = data.numAttributes()-1;
double bestPerf = data.attributeStats(idx).numericStats.max;
for(int i=0;i<data.numInstances();i++)
if(data.get(i).value(idx)==bestPerf)
return data.get(i);
return null;//should never return NULL
}
示例5: testCOMT2
import weka.core.Instances; //導入方法依賴的package包/類
public static void testCOMT2() throws Exception{
BestConf bestconf = new BestConf();
Instances trainingSet = DataIOFile.loadDataFromArffFile("data/trainingBestConf0.arff");
trainingSet.setClassIndex(trainingSet.numAttributes()-1);
Instances samplePoints = LHSInitializer.getMultiDimContinuous(bestconf.getAttributes(), InitialSampleSetSize, false);
samplePoints.insertAttributeAt(trainingSet.classAttribute(), samplePoints.numAttributes());
samplePoints.setClassIndex(samplePoints.numAttributes()-1);
COMT2 comt = new COMT2(samplePoints, COMT2Iteration);
comt.buildClassifier(trainingSet);
Evaluation eval = new Evaluation(trainingSet);
eval.evaluateModel(comt, trainingSet);
System.err.println(eval.toSummaryString());
Instance best = comt.getInstanceWithPossibleMaxY(samplePoints.firstInstance());
Instances bestInstances = new Instances(trainingSet,2);
bestInstances.add(best);
DataIOFile.saveDataToXrffFile("data/trainingBestConf_COMT2.arff", bestInstances);
//now we output the training set with the class value updated as the predicted value
Instances output = new Instances(trainingSet, trainingSet.numInstances());
Enumeration<Instance> enu = trainingSet.enumerateInstances();
while(enu.hasMoreElements()){
Instance ins = enu.nextElement();
double[] values = ins.toDoubleArray();
values[values.length-1] = comt.classifyInstance(ins);
output.add(ins.copy(values));
}
DataIOFile.saveDataToXrffFile("data/trainingBestConf0_predict.xrff", output);
}
示例6: distributionsForInstances
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public double[][] distributionsForInstances(Instances batch) {
double[][] dists = new double[batch.numInstances()][2];
for (int i = 0; i < batch.numInstances(); i++) {
Instance ins = batch.instance(i);
dists[i] = new double[2];
dists[i][1] = this.scoreInstance(ins);
}
return dists;
}
示例7: main
import weka.core.Instances; //導入方法依賴的package包/類
public static void main(String[] args){
GRules GRulesLearner = new GRules();
try {
ConverterUtils.DataSource source = new ConverterUtils.DataSource("data/car.arff");
Instances originalDataset = source.getDataSet();
// split data 80/20
int trainSize = (int) Math.round(originalDataset.numInstances() * 0.8
/ 100);
int testSize = originalDataset.numInstances() - trainSize;
Instances train = new Instances(originalDataset, 0, trainSize);
Instances test = new Instances(originalDataset, trainSize, testSize);
train.randomize(new java.util.Random(0));
// train the rules learner
List<List<Term>> rightSideRules = GRulesLearner.learnRightSide(train);
List<Rule> completedRules = GRulesLearner.induceCompleteRules(rightSideRules, originalDataset);
// try to predict an instance
System.out.println("A testing instance: ");
System.out.println(test.get(10));
System.out.println("A rule covered the instance: ");
System.out.println(GRulesLearner.predict(test.get(10), completedRules).nicePrint());
} catch (Exception ex) {
System.err.println(ex.toString());
}
}
示例8: findBestPerfIndex
import weka.core.Instances; //導入方法依賴的package包/類
public static int findBestPerfIndex(Instances data){
int idx = data.numAttributes()-1;
double bestPerf = data.attributeStats(idx).numericStats.max;
for(int i=0;i<data.numInstances();i++)
if(data.get(i).value(idx)==bestPerf)
return i;
return -1;//should never return -1
}
示例9: keoghPrunedDTW
import weka.core.Instances; //導入方法依賴的package包/類
/**
* Run LBKeogh-PrunedDTW method for a given size
* @param data
* @param sampleSize
* @return
*/
public static double keoghPrunedDTW(Instances data, int sampleSize) {
double share = 1, searchTime = 0;
long start, stop;
LbKeoghPrunedDTW classifier = new LbKeoghPrunedDTW(datasetName);
classifier.setResDir(resDir);
classifier.setType(method);
try{
Instances newTrain = Sampling.sample(data, sampleSize);
System.out.println("Size: " + sampleSize + ", Launching SDM16");
if (sampleSize < estimate+1) {
start = System.nanoTime();
classifier.buildClassifier(newTrain);
stop = System.nanoTime();
}
else {
start = System.nanoTime();
classifier.buildClassifierEstimate(newTrain, estimate);
stop = System.nanoTime();
share = 1.0 * (estimate+1) /newTrain.numInstances();
}
searchTime = 1.0 * ((stop - start)/1e9);
searchTime = searchTime/share;
saveSearchTime(sampleSize, searchTime);
System.out.println("Size: " + sampleSize + ", " + searchTime + " s");
} catch (Exception e) {
e.printStackTrace();
}
return searchTime;
}
示例10: orderByLargestClass
import weka.core.Instances; //導入方法依賴的package包/類
/**
* Reorder the dataset by its largest class
* @param data
* @return
*/
public static Instances orderByLargestClass(Instances data) {
Instances newData = new Instances(data, data.numInstances());
// get the number of class in the data
int nbClass = data.numClasses();
int[] instancePerClass = new int[nbClass];
int[] labels = new int[nbClass];
int[] classIndex = new int[nbClass];
// sort the data base on its class
data.sort(data.classAttribute());
// get the number of instances per class in the data
for (int i = 0; i < nbClass; i++) {
instancePerClass[i] = data.attributeStats(data.classIndex()).nominalCounts[i];
labels[i] = i;
if (i > 0)
classIndex[i] = classIndex[i-1] + instancePerClass[i-1];
}
QuickSort.sort(instancePerClass, labels);
for (int i = nbClass-1; i >=0 ; i--) {
for (int j = 0; j < instancePerClass[i]; j++) {
newData.add(data.instance(classIndex[labels[i]] + j));
}
}
return newData;
}
示例11: generateARFF
import weka.core.Instances; //導入方法依賴的package包/類
/**<p>Generate Random sample according to random seed on Desktop, each sample has the same distribution of InTrace/OutTrace
* and have <b>SIZE</b> instances.
* </p>
* @param path original arff file to be sampled in path
* @param rand random seed
* @param num the number of selection
* */
public static void generateARFF(String path, int rand, int num) throws Exception{
/*** original dataset reading */
Instances data = DataSource.read(path);
data.setClassIndex(data.numAttributes()-1);
/*** randomize the dataset */
data.randomize(new Random(rand));
/*** dataIn to save instances of InTrace class */
Instances dataIn = new Instances("dataIn", InsMerge.getStandAttrs(), 1);
dataIn.setClassIndex(dataIn.numAttributes() - 1);
/*** dataOut to save instances of OutTrace class */
Instances dataOut = new Instances("dataOut", InsMerge.getStandAttrs(), 1);
dataIn.setClassIndex(dataIn.numAttributes() - 1);
/*** add OutTrace instances into dataOut */
for(int i=0; i<data.numInstances(); i++){
if(data.get(i).stringValue(data.get(i).classAttribute()).equals("OutTrace")){
dataOut.add(data.get(i));
}
}
/** add InTrace instances into dataIn */
for(int i=0; i<data.numInstances(); i++){
if(data.get(i).stringValue(data.get(i).classAttribute()).equals("InTrace")){
dataIn.add(data.get(i));
}
}
/*** get the In/Out ratio in original dataset */
int inTrace = dataIn.numInstances();
int outTrace = dataOut.numInstances();
double ratioI = inTrace*1.0/(outTrace + inTrace);
/*** expected number to select from original dataset*/
int intrace = (int) (num * ratioI);
int outtrace = num - intrace;
/** create new generated dataset train*/
Instances train = new Instances("dataIn", InsMerge.getStandAttrs(), 1);
train.setClassIndex(train.numAttributes() - 1);
/** train get X instances from dataIn*/
for(int i=0; i<intrace; i++){
train.add(dataIn.get(i));
}
/** train get Y instances from dataOut*/
for(int j=0; j<outtrace; j++){
train.add(dataOut.get(j));
}
/** save the dataset in path, we save the arff into D:/Users/LEE/Desktop/New_Data/XXX.arff */
String filename = "files/generated/" + filterName(path) + rand + ".arff";
DataSink.write(filename, train);
}
示例12: runExp
import weka.core.Instances; //導入方法依賴的package包/類
public Instances runExp(Instances samplePoints, String perfAttName){
Instances retVal = null;
if(samplePoints.attribute(perfAttName) == null){
Attribute performance = new Attribute(perfAttName);
samplePoints.insertAttributeAt(performance, samplePoints.numAttributes());
}
int pos = samplePoints.numInstances();
int count = 0;
for (int i = 0; i < pos; i++) {
Instance ins = samplePoints.get(i);
HashMap hm = new HashMap();
int tot = 0;
for (int j = 0; j < ins.numAttributes(); j++) {
hm.put(ins.attribute(j).name(), ins.value(ins.attribute(j)));
}
boolean testRet;
if (Double.isNaN(ins.value(ins.attribute(ins.numAttributes() - 1)))) {
testRet = this.startTest(hm, i, isInterrupt);
double y = 0;
if (!testRet) {// the setting does not work, we skip it
y = -1;
count++;
if (count >= targetTestErrorNum) {
System.out.println("There must be somthing wrong with the system. Please check and restart.....");
System.exit(1);
}
} else {
y = getPerformanceByType(performanceType);
count = 0;
}
ins.setValue(samplePoints.numAttributes() - 1, y);
writePerfstoFile(ins);
} else {
continue;
}
}
retVal = samplePoints;
retVal.setClassIndex(retVal.numAttributes()-1);
return retVal;
}
示例13: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
U1 = new double[maxLength];
L1 = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[maxWindow+1][train.length];
cache = new SequenceStatsCache(train, maxWindow);
lazyUCR = new LazyAssessNNEarlyAbandon[train.length][train.length];
for (int i = 0; i < train.length; i++) {
for (int j = 0; j < train.length; j++) {
lazyUCR[i][j] = new LazyAssessNNEarlyAbandon(cache);
}
}
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例14: buildClassifier
import weka.core.Instances; //導入方法依賴的package包/類
@Override
public void buildClassifier(Instances data) throws Exception {
// Initialise training dataset
Attribute classAttribute = data.classAttribute();
classedData = new HashMap<>();
classedDataIndices = new HashMap<>();
for (int c = 0; c < data.numClasses(); c++) {
classedData.put(data.classAttribute().value(c), new ArrayList<SymbolicSequence>());
classedDataIndices.put(data.classAttribute().value(c), new ArrayList<Integer>());
}
train = new SymbolicSequence[data.numInstances()];
classMap = new String[train.length];
maxLength = 0;
for (int i = 0; i < train.length; i++) {
Instance sample = data.instance(i);
MonoDoubleItemSet[] sequence = new MonoDoubleItemSet[sample.numAttributes() - 1];
maxLength = Math.max(maxLength, sequence.length);
int shift = (sample.classIndex() == 0) ? 1 : 0;
for (int t = 0; t < sequence.length; t++) {
sequence[t] = new MonoDoubleItemSet(sample.value(t + shift));
}
train[i] = new SymbolicSequence(sequence);
String clas = sample.stringValue(classAttribute);
classMap[i] = clas;
classedData.get(clas).add(train[i]);
classedDataIndices.get(clas).add(i);
}
warpingMatrix = new double[maxLength][maxLength];
U = new double[maxLength];
L = new double[maxLength];
maxWindow = Math.round(1 * maxLength);
searchResults = new String[maxWindow+1];
nns = new int[maxWindow+1][train.length];
dist = new double[maxWindow+1][train.length];
// Start searching for the best window
searchBestWarpingWindow();
// Saving best windows found
System.out.println("Windows found=" + bestWarpingWindow + " Best Acc=" + (1-bestScore));
}
示例15: orderByCompactClass
import weka.core.Instances; //導入方法依賴的package包/類
/**
* Reorder the data by compactness of each class using Euclidean distance
* @param data
* @return
*/
public static Instances orderByCompactClass(Instances data) {
Instances newData = new Instances(data, data.numInstances());
// get the number of class in the data
int nbClass = data.numClasses();
int[] instancePerClass = new int[nbClass];
int[] labels = new int[nbClass];
int[] classIndex = new int[nbClass];
double[] compactness = new double[nbClass];
// sort the data base on its class
data.sort(data.classAttribute());
int start = 0;
// get the number of instances per class in the data
for (int i = 0; i < nbClass; i++) {
instancePerClass[i] = data.attributeStats(data.classIndex()).nominalCounts[i];
labels[i] = i;
if (i > 0)
classIndex[i] = classIndex[i-1] + instancePerClass[i-1];
int end = start + instancePerClass[i];
int counter = 0;
double[][] dataPerClass = new double[instancePerClass[i]][data.numAttributes()-1];
for (int j = start; j < end; j++) {
dataPerClass[counter++] = data.instance(j).toDoubleArray();
}
double[] mean = arithmeticMean(dataPerClass);
double d = 0;
for (int j = 0; j < instancePerClass[i]; j++) {
double temp = euclideanDistance(mean, dataPerClass[j]);
temp *= temp;
temp -= (mean[0] - dataPerClass[j][0]) * (mean[0] - dataPerClass[j][0]);
d += temp;
}
compactness[i] = d / instancePerClass[i];
start = end;
}
QuickSort.sort(compactness, labels);
for (int i = nbClass-1; i >=0 ; i--) {
for (int j = 0; j < instancePerClass[labels[i]]; j++) {
newData.add(data.instance(classIndex[labels[i]] + j));
}
}
return newData;
}