本文整理汇总了Java中weka.clusterers.ClusterEvaluation.setClusterer方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterEvaluation.setClusterer方法的具体用法?Java ClusterEvaluation.setClusterer怎么用?Java ClusterEvaluation.setClusterer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.clusterers.ClusterEvaluation
的用法示例。
在下文中一共展示了ClusterEvaluation.setClusterer方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: guessNumClusters
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
private int guessNumClusters(EM clusterer, Instances instances, int start, int end) throws Exception {
ClusterEvaluation eval = new ClusterEvaluation();
int bestNum = start;
double best = Double.POSITIVE_INFINITY;
double bic;
for (int c = start; c <= end; c++) {
clusterer.setNumClusters(c);
clusterer.buildClusterer(instances);
eval.setClusterer(clusterer);
eval.evaluateClusterer(instances);
bic = bic(eval.getLogLikelihood(), c, instances.numInstances());
logger.trace("numCluster " + c + " -> BIC: " + bic);
if (bic < best) {
best = bic;
bestNum = c;
logger.trace("bestNum: " + bestNum);
}
}
return bestNum;
}
示例2: generateClassToCluster
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
public void generateClassToCluster(){
Remove filter = new Remove();
filter.setAttributeIndices("" + (weather.classIndex() + 1));
try {
filter.setInputFormat(weather);
Instances dataClusterer = Filter.useFilter(weather, filter);
clusterer = new EM();
clusterer.buildClusterer(dataClusterer);
ClusterEvaluation eval = new ClusterEvaluation();
eval.setClusterer(clusterer);
eval.evaluateClusterer(weather);
System.out.println(eval.clusterResultsToString());
} catch (Exception e) {
}
}
示例3: getResult
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
/**
* Gets the results for the supplied train and test datasets.
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in
* the array may be Strings, Doubles, or null (for the missing value).
* @exception Exception if a problem occurs while getting the results
*/
public Object [] getResult(Instances train, Instances test)
throws Exception {
if (m_clusterer == null) {
throw new Exception("No clusterer has been specified");
}
int addm = (m_additionalMeasures != null)
? m_additionalMeasures.length
: 0;
int overall_length = RESULT_SIZE+addm;
if (m_removeClassColumn && train.classIndex() != -1) {
// remove the class column from the training and testing data
Remove r = new Remove();
r.setAttributeIndicesArray(new int [] {train.classIndex()});
r.setInvertSelection(false);
r.setInputFormat(train);
train = Filter.useFilter(train, r);
test = Filter.useFilter(test, r);
}
train.setClassIndex(-1);
test.setClassIndex(-1);
ClusterEvaluation eval = new ClusterEvaluation();
Object [] result = new Object[overall_length];
long trainTimeStart = System.currentTimeMillis();
m_clusterer.buildClusterer(train);
double numClusters = m_clusterer.numberOfClusters();
eval.setClusterer(m_clusterer);
long trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
long testTimeStart = System.currentTimeMillis();
eval.evaluateClusterer(test);
long testTimeElapsed = System.currentTimeMillis() - testTimeStart;
// m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(test.numInstances());
result[current++] = new Double(eval.getLogLikelihood());
result[current++] = new Double(numClusters);
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
for (int i=0;i<addm;i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer)m_clusterer).
getMeasure(m_additionalMeasures[i]);
Double value = new Double(dv);
result[current++] = value;
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
if (current != overall_length) {
throw new Error("Results didn't fit RESULT_SIZE");
}
return result;
}
示例4: start
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
@Override
public void start() throws Exception {
if (clusterer == null || data == null) {
throw new Exception("Please provide data examples and clusterer.");
}
// Przygotuj dane -> Ustaw klasę decyzyjną
if (data.classIndex() == -1) {
data.setClassIndex(data.numAttributes() - 1);
}
// Rozdziel przykłady na dwie klasy: mniejszościową i większościową
Instances minorityInstances = new Instances(data);
Instances majorityInstances = new Instances(data);
separateDecisionClasses(data, minorityInstances, majorityInstances);
// Pokaż statystyki przykladow
showInitialDataStatitistics(minorityInstances, majorityInstances);
// Usuń klasę decyzyjną -> uczenie nienadzorowane
weka.filters.unsupervised.attribute.Remove filter = new weka.filters.unsupervised.attribute.Remove();
filter.setAttributeIndices("" + (minorityInstances.classIndex() + 1));
filter.setInputFormat(minorityInstances);
Instances minorityInstancesNoClass = Filter.useFilter(minorityInstances, filter);
Instances majorityInstancesNoClass = Filter.useFilter(majorityInstances, filter);
Instances dataNoClass = Filter.useFilter(data, filter);
// Stwórz skupiska
DBSCAN dbScan = new DBSCAN();
dbScan.setDatabase_Type("weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase");
dbScan.setDatabase_distanceType("weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclideanDataObject");
dbScan.setEpsilon(0.5);
dbScan.setMinPoints(4);
clusterer = dbScan;
clusterer.buildClusterer(minorityInstancesNoClass);
// Pokaż wyniki analizy skupisk
ClusterEvaluation evaluation = new ClusterEvaluation();
evaluation.setClusterer(clusterer);
evaluation.evaluateClusterer(minorityInstancesNoClass);
System.out.println(evaluation.clusterResultsToString());
// Klasyfikuj przyklady klasy wiekszosciowej do skupisk (nie modyfukuje skupisk)
Map<Integer, Integer> majorityHistogram = createHistogram(clusterer, majorityInstancesNoClass);
Map<Integer, Integer> minorityHistogram = createHistogram(clusterer, minorityInstancesNoClass);
// Pokaz statystyki
showHistogram(majorityHistogram, "Majority");
showHistogram(minorityHistogram, "Minority");
showCombinedHistograms(majorityHistogram, minorityHistogram);
Map<Integer, Instances> clustersAssigmentsMap = new TreeMap<Integer, Instances>();
}
示例5: start
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
@Override
public void start() throws Exception {
if (clusterer == null || data == null) {
throw new Exception("Please provide data examples and clusterer.");
}
// Przygotuj dane -> Ustaw klasę decyzyjną
if (data.classIndex() == -1) {
data.setClassIndex(data.numAttributes() - 1);
}
// Rozdziel przykłady na dwie klasy: mniejszościową i większościową
Instances minorityInstances = new Instances(data);
Instances majorityInstances = new Instances(data);
separateDecisionClasses(data, minorityInstances, majorityInstances);
// Pokaż statystyki przykladow
showInitialDataStatitistics(data, minorityInstances, majorityInstances);
// Usuń klasę decyzyjną -> uczenie nienadzorowane
weka.filters.unsupervised.attribute.Remove filter = new weka.filters.unsupervised.attribute.Remove();
filter.setAttributeIndices("" + (minorityInstances.classIndex() + 1));
filter.setInputFormat(minorityInstances);
Instances minorityInstancesNoClass = Filter.useFilter(minorityInstances, filter);
Instances majorityInstancesNoClass = Filter.useFilter(majorityInstances, filter);
Instances dataNoClass = Filter.useFilter(data, filter);
// Stwórz skupiska
String options;
if (autoParametrizationEnable) {
options = getParametersForKMeans(minorityInstancesNoClass, filename);
clusterer.setOptions(weka.core.Utils.splitOptions(options));
}
clusterer.buildClusterer(minorityInstancesNoClass);
// Pokaż wyniki analizy skupisk
ClusterEvaluation evaluation = new ClusterEvaluation();
evaluation.setClusterer(clusterer);
evaluation.evaluateClusterer(minorityInstancesNoClass);
System.out.println(evaluation.clusterResultsToString());
// Klasyfikuj przyklady klasy wiekszosciowej do skupisk (nie modyfukuje skupisk)
Map<Integer, Integer> majorityHistogram = createHistogram(clusterer, majorityInstancesNoClass);
Map<Integer, Integer> minorityHistogram = createHistogram(clusterer, minorityInstancesNoClass);
// Pokaz statystyki
showHistogram(majorityHistogram, "Majority");
showHistogram(minorityHistogram, "Minority");
showCombinedHistograms(majorityHistogram, minorityHistogram);
// Rozdziel przyklady z klasami decyzyjnymi wedlug wyznaczonych skupisk
Map<Integer, Instances> clustersAssignmentsMap = createClusterAssignmentsMap(data, dataNoClass, clusterer);
showClusterAssignmentsMap(clustersAssignmentsMap);
//SMOTE + Klasyfikacja
System.out.println("Classifiers");
for (Map.Entry<Integer, Instances> entry : clustersAssignmentsMap.entrySet()) {
FilteredClassifier fc = new FilteredClassifier();
if (smoteEnable) {
System.out.println("SMOTE filtering");
SMOTE smote = new SMOTE();
smote.setOptions(weka.core.Utils.splitOptions("-C 0 -K 5 -P " + getSMOTEPercentage(entry.getValue()) + " -S 1"));
smote.setInputFormat(entry.getValue());
fc.setFilter(smote);
}
fc.setClassifier(classifier);
Evaluation eval = new Evaluation(entry.getValue());
eval.crossValidateModel(fc, entry.getValue(), 10, new Random(1));
System.out.println(eval.toSummaryString("\nResults\n======\n", false));
System.out.println(eval.toMatrixString());
}
}
示例6: getParametersForKMeans
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
public static String getParametersForKMeans(Instances instances, String name) {
SimpleKMeans kmeans = new SimpleKMeans();
String options = "-init 0 -max-candidates 100 -periodic-pruning 10000 -min-density 2.0 -t1 -1.25 -t2 -1.0 -V -N 2 -A \"weka.core.EuclideanDistance -R first-last\" -I 500 -num-slots 1 -S 10";
try {
kmeans.setOptions(weka.core.Utils.splitOptions(options));
int numK = 20;
double[] results = new double[numK];
for (int i = 1; i <= numK; i++) {
kmeans.setNumClusters(i);
kmeans.buildClusterer(instances);
double[][] centroids = new double[i][kmeans.getClusterCentroids().get(0).numAttributes()];
double[][] stdDevs = new double[i][kmeans.getClusterStandardDevs().get(0).numAttributes()];
for (int j = 0; j < i; j++) {
for (int k = 0; k < kmeans.getClusterCentroids().get(0).numAttributes(); k++) {
if (!Double.isNaN(kmeans.getClusterCentroids().get(j).value(k)) && !Double.isNaN(kmeans.getClusterStandardDevs().get(j).value(k))) {
centroids[j][k] = kmeans.getClusterCentroids().get(j).value(k);
stdDevs[j][k] = kmeans.getClusterStandardDevs().get(j).value(k);
} else {
centroids[j][k] = 0.0;
stdDevs[j][k] = 0.0;
}
}
}
double sum_clust = 0;
for (int j = 0; j < centroids.length; j++) {
double sum_att = 0;
for (int k = 0; k < centroids[0].length; k++) {
double d = stdDevs[j][k];
sum_att += d;
}
sum_clust += sum_att;
}
results[i - 1] = sum_clust / centroids.length;
// Pokaż wyniki analizy skupisk
ClusterEvaluation evaluation = new ClusterEvaluation();
evaluation.setClusterer(kmeans);
evaluation.evaluateClusterer(instances);
//System.out.println(evaluation.clusterResultsToString());
}
System.out.println(name);
for (int i = 0; i < numK; i++) {
System.out.println(String.format("%.3f", results[i]));
}
System.out.println();
} catch (Exception e) {
e.printStackTrace();
}
return "";
}
示例7: main
import weka.clusterers.ClusterEvaluation; //导入方法依赖的package包/类
public static void main(String[] args) {
try {
ConverterUtils.DataSource source = new ConverterUtils.DataSource("fertility_Diagnosis.arff");
Instances instances = source.getDataSet();
instances.deleteAttributeAt(9);
SelfOrganizingMap som = new SelfOrganizingMap();
som.setConvergenceEpochs(1000);
som.setLearningRate(0.3);
som.setOrderingEpochs(50);
som.setWidth(3);
som.setCalcStats(true);
som.buildClusterer(instances);
ClusterEvaluation eval = new ClusterEvaluation();
//
//
eval.setClusterer(som);
eval.evaluateClusterer(instances);
System.out.println(eval.clusterResultsToString());
for (int i=0;i<instances.size();i++){
double[] dist=som.distributionForInstance(instances.get(i));
// System.out.println(Utils.arrayToString(dist));
}
//
// System.out.println(eval.clusterResultsToString());
// System.out.println(som.toString());
// System.out.println(som.toString());
}catch (Exception ignored){
ignored.printStackTrace();
}
}