本文整理汇总了Java中weka.filters.unsupervised.attribute.Remove.setInvertSelection方法的典型用法代码示例。如果您正苦于以下问题:Java Remove.setInvertSelection方法的具体用法?Java Remove.setInvertSelection怎么用?Java Remove.setInvertSelection使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.filters.unsupervised.attribute.Remove
的用法示例。
在下文中一共展示了Remove.setInvertSelection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: removeClass
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
private Instances removeClass(Instances inst) {
Remove af = new Remove();
Instances retI = null;
try {
if (inst.classIndex() < 0) {
retI = inst;
} else {
af.setAttributeIndices("" + (inst.classIndex() + 1));
af.setInvertSelection(false);
af.setInputFormat(inst);
retI = Filter.useFilter(inst, af);
}
} catch (Exception e) {
e.printStackTrace();
}
return retI;
}
示例2: removeIgnoreCols
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
private Instances removeIgnoreCols(Instances inst) {
// If the user is doing classes to clusters evaluation and
// they have opted to ignore the class, then unselect the class in
// the ignore list
if (m_ClassesToClustersBut.isSelected()) {
int classIndex = m_ClassCombo.getSelectedIndex();
if (m_ignoreKeyList.isSelectedIndex(classIndex)) {
m_ignoreKeyList.removeSelectionInterval(classIndex, classIndex);
}
}
int[] selected = m_ignoreKeyList.getSelectedIndices();
Remove af = new Remove();
Instances retI = null;
try {
af.setAttributeIndicesArray(selected);
af.setInvertSelection(false);
af.setInputFormat(inst);
retI = Filter.useFilter(inst, af);
} catch (Exception e) {
e.printStackTrace();
}
return retI;
}
示例3: setAttributesToIgnore
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Sets up a Remove filter to remove attributes that
* are to be ignored by the clusterer. setHeader must
* be called before this method.
*
* @param attsToIgnore any attributes to ignore during the scoring process
*/
public void setAttributesToIgnore(int[] attsToIgnore) throws Exception {
Instances headerI = getHeader();
m_ignoredAtts = new Remove();
m_ignoredAtts.setAttributeIndicesArray(attsToIgnore);
m_ignoredAtts.setInvertSelection(false);
m_ignoredAtts.setInputFormat(headerI);
StringBuffer temp = new StringBuffer();
temp.append("Attributes ignored by clusterer:\n\n");
for (int i = 0; i < attsToIgnore.length; i++) {
temp.append(headerI.attribute(attsToIgnore[i]).name() + "\n");
}
temp.append("\n\n");
m_ignoredString = temp.toString();
}
示例4: buildLinearModel
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Build a linear model for this node using those attributes specified in
* indices.
*
* @param indices an array of attribute indices to include in the linear model
* @throws Exception if something goes wrong
*/
private void buildLinearModel(int[] indices) throws Exception {
// copy the training instances and remove all but the tested
// attributes
Instances reducedInst = new Instances(m_instances);
Remove attributeFilter = new Remove();
attributeFilter.setInvertSelection(true);
attributeFilter.setAttributeIndicesArray(indices);
attributeFilter.setInputFormat(reducedInst);
reducedInst = Filter.useFilter(reducedInst, attributeFilter);
// build a linear regression for the training data using the
// tested attributes
LinearRegression temp = new LinearRegression();
temp.buildClassifier(reducedInst);
double[] lmCoeffs = temp.coefficients();
double[] coeffs = new double[m_instances.numAttributes()];
for (int i = 0; i < lmCoeffs.length - 1; i++) {
if (indices[i] != m_classIndex) {
coeffs[indices[i]] = lmCoeffs[i];
}
}
m_nodeModel = new PreConstructedLinearModel(coeffs,
lmCoeffs[lmCoeffs.length - 1]);
m_nodeModel.buildClassifier(m_instances);
}
示例5: buildClusterer
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Builds the clusters
*/
private void buildClusterer() throws Exception {
if (m_trainingSet.classIndex() < 0) {
m_Clusterer.buildClusterer(m_trainingSet);
} else { // class based evaluation if class attribute is set
Remove removeClass = new Remove();
removeClass.setAttributeIndices("" + (m_trainingSet.classIndex() + 1));
removeClass.setInvertSelection(false);
removeClass.setInputFormat(m_trainingSet);
Instances clusterTrain = Filter.useFilter(m_trainingSet, removeClass);
m_Clusterer.buildClusterer(clusterTrain);
}
}
示例6: applyRemoveFilter
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Remove desired attribute from Weka data
* @param data weka data from which attribute is to be removed
* @param attriId ID in Weka of attribute to be removed
* @return data after desired attribute is removed
* @throws Exception
*/
private static Instances applyRemoveFilter(Instances data, String attriId) throws Exception {
Remove keepAttributes = new Remove();
keepAttributes.setAttributeIndices(attriId+ ","+ Integer.toString(data.numAttributes() - 1) + ",last");
keepAttributes.setInvertSelection(true);
keepAttributes.setInputFormat(data);
data = Filter.useFilter(data, keepAttributes);
System.out.println("RemoveFilter applied.");
return data;
}
示例7: buildLinearModel
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Build a linear model for this node using those attributes
* specified in indices.
*
* @param indices an array of attribute indices to include in the linear
* model
* @throws Exception if something goes wrong
*/
private void buildLinearModel(int [] indices) throws Exception {
// copy the training instances and remove all but the tested
// attributes
Instances reducedInst = new Instances(m_instances);
Remove attributeFilter = new Remove();
attributeFilter.setInvertSelection(true);
attributeFilter.setAttributeIndicesArray(indices);
attributeFilter.setInputFormat(reducedInst);
reducedInst = Filter.useFilter(reducedInst, attributeFilter);
// build a linear regression for the training data using the
// tested attributes
LinearRegression temp = new LinearRegression();
temp.buildClassifier(reducedInst);
double [] lmCoeffs = temp.coefficients();
double [] coeffs = new double [m_instances.numAttributes()];
for (int i = 0; i < lmCoeffs.length - 1; i++) {
if (indices[i] != m_classIndex) {
coeffs[indices[i]] = lmCoeffs[i];
}
}
m_nodeModel = new PreConstructedLinearModel(coeffs, lmCoeffs[lmCoeffs.length - 1]);
m_nodeModel.buildClassifier(m_instances);
}
示例8: buildClusterer
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Builds the clusters
*/
private void buildClusterer() throws Exception {
if(m_trainingSet.classIndex() < 0)
m_Clusterer.buildClusterer(m_trainingSet);
else{ //class based evaluation if class attribute is set
Remove removeClass = new Remove();
removeClass.setAttributeIndices(""+(m_trainingSet.classIndex()+1));
removeClass.setInvertSelection(false);
removeClass.setInputFormat(m_trainingSet);
Instances clusterTrain = Filter.useFilter(m_trainingSet, removeClass);
m_Clusterer.buildClusterer(clusterTrain);
}
}
示例9: removeMissingColumns
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Removes columns that are all missing from the data
*
* @param instances the instances
* @return a new set of instances with all missing columns removed
* @throws Exception if something goes wrong
*/
protected Instances removeMissingColumns(Instances instances)
throws Exception {
int numInstances = instances.numInstances();
StringBuffer deleteString = new StringBuffer();
int removeCount = 0;
boolean first = true;
int maxCount = 0;
for (int i = 0; i < instances.numAttributes(); i++) {
AttributeStats as = instances.attributeStats(i);
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
// see if we can decrease this by looking for the most frequent value
int[] counts = as.nominalCounts;
if (counts[Utils.maxIndex(counts)] > maxCount) {
maxCount = counts[Utils.maxIndex(counts)];
}
}
if (as.missingCount == numInstances) {
if (first) {
deleteString.append((i + 1));
first = false;
} else {
deleteString.append("," + (i + 1));
}
removeCount++;
}
}
if (m_verbose) {
System.err.println("Removed : " + removeCount
+ " columns with all missing " + "values.");
}
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
m_upperBoundMinSupport = (double) maxCount / (double) numInstances;
if (m_verbose) {
System.err.println("Setting upper bound min support to : "
+ m_upperBoundMinSupport);
}
}
if (deleteString.toString().length() > 0) {
Remove af = new Remove();
af.setAttributeIndices(deleteString.toString());
af.setInvertSelection(false);
af.setInputFormat(instances);
Instances newInst = Filter.useFilter(instances, af);
return newInst;
}
return instances;
}
示例10: removeMissingColumns
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Removes columns that are all missing from the data
* @param instances the instances
* @return a new set of instances with all missing columns removed
* @throws Exception if something goes wrong
*/
protected Instances removeMissingColumns(Instances instances)
throws Exception {
int numInstances = instances.numInstances();
StringBuffer deleteString = new StringBuffer();
int removeCount = 0;
boolean first = true;
int maxCount = 0;
for (int i=0;i<instances.numAttributes();i++) {
AttributeStats as = instances.attributeStats(i);
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
// see if we can decrease this by looking for the most frequent value
int [] counts = as.nominalCounts;
if (counts[Utils.maxIndex(counts)] > maxCount) {
maxCount = counts[Utils.maxIndex(counts)];
}
}
if (as.missingCount == numInstances) {
if (first) {
deleteString.append((i+1));
first = false;
} else {
deleteString.append(","+(i+1));
}
removeCount++;
}
}
if (m_verbose) {
System.err.println("Removed : "+removeCount+" columns with all missing "
+"values.");
}
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
m_upperBoundMinSupport = (double)maxCount / (double)numInstances;
if (m_verbose) {
System.err.println("Setting upper bound min support to : "
+m_upperBoundMinSupport);
}
}
if (deleteString.toString().length() > 0) {
Remove af = new Remove();
af.setAttributeIndices(deleteString.toString());
af.setInvertSelection(false);
af.setInputFormat(instances);
Instances newInst = Filter.useFilter(instances, af);
return newInst;
}
return instances;
}
示例11: buildClassifier
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
@Override
public void buildClassifier(Instances data) throws Exception {
testCapabilities(data);
int c = data.classIndex();
if(getDebug()) System.out.print("-: Creating "+c+" models ("+m_Classifier.getClass().getName()+"): ");
m_MultiClassifiers = AbstractClassifier.makeCopies(m_Classifier,c);
Instances sub_data = null;
for(int i = 0; i < c; i++) {
int indices[][] = new int[c][c - 1];
for(int j = 0, k = 0; j < c; j++) {
if(j != i) {
indices[i][k++] = j;
}
}
//Select only class attribute 'i'
Remove FilterRemove = new Remove();
FilterRemove.setAttributeIndicesArray(indices[i]);
FilterRemove.setInputFormat(data);
FilterRemove.setInvertSelection(true);
sub_data = Filter.useFilter(data, FilterRemove);
sub_data.setClassIndex(0);
/* BEGIN downsample for this link */
sub_data.randomize(m_Random);
int numToRemove = sub_data.numInstances() - (int)Math.round(sub_data.numInstances() * m_DownSampleRatio);
for(int m = 0, removed = 0; m < sub_data.numInstances(); m++) {
if (sub_data.instance(m).classValue() <= 0.0) {
sub_data.instance(m).setClassMissing();
if (++removed >= numToRemove)
break;
}
}
sub_data.deleteWithMissingClass();
/* END downsample for this link */
//Build the classifier for that class
m_MultiClassifiers[i].buildClassifier(sub_data);
if(getDebug()) System.out.print(" " + (i+1));
}
if(getDebug()) System.out.println(" :-");
m_InstancesTemplate = new Instances(sub_data, 0);
}
示例12: removeMissingColumns
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Removes columns that are all missing from the data
*
* @param instances the instances
* @return a new set of instances with all missing columns removed
* @throws Exception if something goes wrong
*/
protected Instances removeMissingColumns(Instances instances)
throws Exception {
int numInstances = instances.numInstances();
StringBuffer deleteString = new StringBuffer();
int removeCount = 0;
boolean first = true;
int maxCount = 0;
for (int i = 0; i < instances.numAttributes(); i++) {
AttributeStats as = instances.attributeStats(i);
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
// see if we can decrease this by looking for the most frequent value
int[] counts = as.nominalCounts;
if (counts[Utils.maxIndex(counts)] > maxCount) {
maxCount = counts[Utils.maxIndex(counts)];
}
}
if (as.missingCount == numInstances) {
if (first) {
deleteString.append((i + 1));
first = false;
} else {
deleteString.append("," + (i + 1));
}
removeCount++;
}
}
if (m_verbose) {
System.err.println("Removed : " + removeCount
+ " columns with all missing " + "values.");
}
if (m_upperBoundMinSupport == 1.0 && maxCount != numInstances) {
m_upperBoundMinSupport = (double) maxCount / (double) numInstances;
if (m_verbose) {
System.err.println("Setting upper bound min support to : "
+ m_upperBoundMinSupport);
}
}
if (deleteString.toString().length() > 0) {
Remove af = new Remove();
af.setAttributeIndices(deleteString.toString());
af.setInvertSelection(false);
af.setInputFormat(instances);
Instances newInst = Filter.useFilter(instances, af);
return newInst;
}
return instances;
}
示例13: evaluateSubset
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be
* evaluated
* @return the error rate
* @throws Exception if the subset could not be evaluated
*/
public double evaluateSubset (BitSet subset)
throws Exception {
int i,j;
double errorRate = 0;
int numAttributes = 0;
Instances trainCopy=null;
Instances testCopy=null;
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the training instances
trainCopy = new Instances(m_trainingInstances);
if (!m_useTraining) {
if (m_holdOutInstances == null) {
throw new Exception("Must specify a set of hold out/test instances "
+"with -H");
}
// copy the test instances
testCopy = new Instances(m_holdOutInstances);
}
// count attributes set in the BitSet
for (i = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
if (!m_useTraining) {
testCopy = Filter.useFilter(testCopy, delTransform);
}
// build the classifier
m_Classifier.buildClassifier(trainCopy);
m_Evaluation = new Evaluation(trainCopy);
if (!m_useTraining) {
m_Evaluation.evaluateModel(m_Classifier, testCopy);
} else {
m_Evaluation.evaluateModel(m_Classifier, trainCopy);
}
if (m_trainingInstances.classAttribute().isNominal()) {
errorRate = m_Evaluation.errorRate();
} else {
errorRate = m_Evaluation.meanAbsoluteError();
}
m_Evaluation = null;
// return the negative of the error rate as search methods need to
// maximize something
return -errorRate;
}
示例14: evaluateSubset
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Evaluates a subset of attributes
*
* @param subset a bitset representing the attribute subset to be
* evaluated
* @return the error rate
* @throws Exception if the subset could not be evaluated
*/
public double evaluateSubset (BitSet subset)
throws Exception {
double errorRate = 0;
double[] repError = new double[5];
int numAttributes = 0;
int i, j;
Random Rnd = new Random(m_seed);
Remove delTransform = new Remove();
delTransform.setInvertSelection(true);
// copy the instances
Instances trainCopy = new Instances(m_trainInstances);
// count attributes set in the BitSet
for (i = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
numAttributes++;
}
}
// set up an array of attribute indexes for the filter (+1 for the class)
int[] featArray = new int[numAttributes + 1];
for (i = 0, j = 0; i < m_numAttribs; i++) {
if (subset.get(i)) {
featArray[j++] = i;
}
}
featArray[j] = m_classIndex;
delTransform.setAttributeIndicesArray(featArray);
delTransform.setInputFormat(trainCopy);
trainCopy = Filter.useFilter(trainCopy, delTransform);
// max of 5 repititions ofcross validation
for (i = 0; i < 5; i++) {
m_Evaluation = new Evaluation(trainCopy);
m_Evaluation.crossValidateModel(m_BaseClassifier, trainCopy, m_folds, Rnd);
repError[i] = m_Evaluation.errorRate();
// check on the standard deviation
if (!repeat(repError, i + 1)) {
i++;
break;
}
}
for (j = 0; j < i; j++) {
errorRate += repError[j];
}
errorRate /= (double)i;
m_Evaluation = null;
return m_trainInstances.classAttribute().isNumeric() ? -errorRate : 1.0 - errorRate;
}
示例15: getResult
import weka.filters.unsupervised.attribute.Remove; //导入方法依赖的package包/类
/**
* Gets the results for the supplied train and test datasets.
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in
* the array may be Strings, Doubles, or null (for the missing value).
* @exception Exception if a problem occurs while getting the results
*/
public Object [] getResult(Instances train, Instances test)
throws Exception {
if (m_clusterer == null) {
throw new Exception("No clusterer has been specified");
}
int addm = (m_additionalMeasures != null)
? m_additionalMeasures.length
: 0;
int overall_length = RESULT_SIZE+addm;
if (m_removeClassColumn && train.classIndex() != -1) {
// remove the class column from the training and testing data
Remove r = new Remove();
r.setAttributeIndicesArray(new int [] {train.classIndex()});
r.setInvertSelection(false);
r.setInputFormat(train);
train = Filter.useFilter(train, r);
test = Filter.useFilter(test, r);
}
train.setClassIndex(-1);
test.setClassIndex(-1);
ClusterEvaluation eval = new ClusterEvaluation();
Object [] result = new Object[overall_length];
long trainTimeStart = System.currentTimeMillis();
m_clusterer.buildClusterer(train);
double numClusters = m_clusterer.numberOfClusters();
eval.setClusterer(m_clusterer);
long trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
long testTimeStart = System.currentTimeMillis();
eval.evaluateClusterer(test);
long testTimeElapsed = System.currentTimeMillis() - testTimeStart;
// m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(test.numInstances());
result[current++] = new Double(eval.getLogLikelihood());
result[current++] = new Double(numClusters);
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
for (int i=0;i<addm;i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer)m_clusterer).
getMeasure(m_additionalMeasures[i]);
Double value = new Double(dv);
result[current++] = value;
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
if (current != overall_length) {
throw new Error("Results didn't fit RESULT_SIZE");
}
return result;
}