本文整理汇总了Java中weka.clusterers.SimpleKMeans.setNumClusters方法的典型用法代码示例。如果您正苦于以下问题:Java SimpleKMeans.setNumClusters方法的具体用法?Java SimpleKMeans.setNumClusters怎么用?Java SimpleKMeans.setNumClusters使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类weka.clusterers.SimpleKMeans
的用法示例。
在下文中一共展示了SimpleKMeans.setNumClusters方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: clusterData
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
public void clusterData(){
kmeans = new SimpleKMeans();
kmeans.setSeed(10);
try {
kmeans.setPreserveInstancesOrder(true);
kmeans.setNumClusters(10);
kmeans.buildClusterer(cpu);
int[] assignments = kmeans.getAssignments();
int i = 0;
for(int clusterNum : assignments) {
System.out.printf("Instance %d -> Cluster %d\n", i, clusterNum);
i++;
}
} catch (Exception e1) {
}
}
示例2: main
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception {
//read the input params
readParams(args);
CSVLoader loader = new CSVLoader();
loader.setSource(inFile);
Instances data = loader.getDataSet();
System.setErr(err);//hack to avoid some error messages
// Create the KMeans object.
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setNumClusters(K);
kmeans.setMaxIterations(maxIteration);
kmeans.setPreserveInstancesOrder(true);
// Perform K-Means clustering.
try {
kmeans.buildClusterer(data);
} catch (Exception ex) {
System.err.println("Unable to buld Clusterer: " + ex.getMessage());
ex.printStackTrace();
}
// print out the cluster centroids
Instances centroids = kmeans.getClusterCentroids();
for (int i = 0; i < K; i++) {
System.out.print("Cluster " + i + " size: " + kmeans.getClusterSizes()[i]);
System.out.println(" Centroid: " + centroids.instance(i));
}
// Print Assignments:
// int[] assignments = kmeans.getAssignments();
// System.out.println("Length: "+assignments.length);
// for (int i = 0; i < assignments.length; i++) {
// System.out.println(assignments[i]);
//
// }
}
示例3: buildClusteredSeries
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
private List<Series<Number, Number>> buildClusteredSeries() throws Exception {
List<XYChart.Series<Number, Number>> clusteredSeries = new ArrayList<>();
// to build the cluster we remove the class information
Remove remove = new Remove();
remove.setAttributeIndices("3");
remove.setInputFormat(data);
Instances dataToBeClustered = Filter.useFilter(data, remove);
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setSeed(10);
kmeans.setPreserveInstancesOrder(true);
kmeans.setNumClusters(3);
kmeans.buildClusterer(dataToBeClustered);
IntStream.range(0, 3).mapToObj(i -> {
Series<Number, Number> newSeries = new XYChart.Series<>();
newSeries.setName(String.valueOf(i));
return newSeries;
}).forEach(clusteredSeries::add);
int[] assignments = kmeans.getAssignments();
for (int i = 0; i < assignments.length; i++) {
int clusterNum = assignments[i];
clusteredSeries.get(clusterNum).getData().add(instancetoChartData(data.get(i)));
}
return clusteredSeries;
}
示例4: run
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
public static void run(String inputFile, String outputFileDistribution, String outputFileSupervised,
int numClusters, int skipLines) throws Exception {
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setPreserveInstancesOrder(true);
kmeans.setNumClusters(numClusters);
BufferedReader datafile = readDataFile(inputFile);
Instances data = new Instances(datafile);
kmeans.buildClusterer(data);
int[] assignments = kmeans.getAssignments();
calculateClusterDistribution(assignments, numClusters, outputFileDistribution);
rebuildArffFileWithClusterNumber(inputFile, outputFileSupervised, assignments, skipLines);
}
示例5: KmeansClustererState
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
public KmeansClustererState(int numClusters, int windowSize, FieldTemplate template) throws Exception {
super(windowSize, template);
// This is where you create your own classifier and set the necessary parameters
clusterer = new SimpleKMeans();
clusterer.setNumClusters(numClusters);
this.numClusters = numClusters;
lock = new Object();
}
示例6: updateClustererNumClusters
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
/**
* Allows parameter updates to the current model
*
* @param k
* @throws Exception
*/
public final void updateClustererNumClusters(int k) throws Exception {
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
Logger.getAnonymousLogger().log(Level.INFO, "updating k and rebuilding clusterer");
synchronized (lock) {
numClusters = k;
clusterer = new SimpleKMeans();
clusterer.setNumClusters(numClusters);
this.wekaAttributes = WekaUtils.makeFeatureVectorForBatchClustering(getFieldTemplate().getRuntimeFeatureCount(), numClusters);
this.wekaAttributes.trimToSize();
this.dataset = new Instances("training", this.wekaAttributes, this.windowSize);
}
}
示例7: kmeans
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
/**
* K-Means Clustering
* @param data - matrix of observations (numObs x numFeatures)
* @param k - number of clusters
*/
public Cluster[] kmeans(double[][] data, int numObs, int numFeatures, int k) {
Instances ds = convertMatrixToWeka(data, numObs, numFeatures);
// uses Euclidean distance by default
SimpleKMeans clusterer = new SimpleKMeans();
try {
clusterer.setPreserveInstancesOrder(true);
clusterer.setNumClusters(k);
clusterer.buildClusterer(ds);
// cluster centers
Instances centers = clusterer.getClusterCentroids();
Cluster[] clusters = new Cluster[centers.numInstances()];
for(int i = 0; i < centers.numInstances(); i++) {
Instance inst = centers.instance(i);
double[] mean = new double[inst.numAttributes()];
for(int j = 0; j < mean.length; j++) {
mean[j] = inst.value(j);
}
clusters[i] = new Cluster(mean, i);
}
// cluster members
int[] assignments = clusterer.getAssignments();
for(int i = 0; i < assignments.length; i++) {
clusters[assignments[i]].addMember(i);
}
return clusters;
} catch (Exception e) {
e.printStackTrace();
System.exit(-1);
return null;
}
}
示例8: buildClassifier
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
/**
* Builds the classifier
*
* @param instances the training data
* @throws Exception if the classifier could not be built successfully
*/
@Override
public void buildClassifier(Instances instances) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(instances);
// remove instances with missing class
instances = new Instances(instances);
instances.deleteWithMissingClass();
// only class? -> build ZeroR model
if (instances.numAttributes() == 1) {
System.err
.println("Cannot build model (only class attribute present in data!), "
+ "using ZeroR model instead!");
m_ZeroR = new weka.classifiers.rules.ZeroR();
m_ZeroR.buildClassifier(instances);
return;
} else {
m_ZeroR = null;
}
m_standardize = new Standardize();
m_standardize.setInputFormat(instances);
instances = Filter.useFilter(instances, m_standardize);
SimpleKMeans sk = new SimpleKMeans();
sk.setNumClusters(m_numClusters);
sk.setSeed(m_clusteringSeed);
MakeDensityBasedClusterer dc = new MakeDensityBasedClusterer();
dc.setClusterer(sk);
dc.setMinStdDev(m_minStdDev);
m_basisFilter = new ClusterMembership();
m_basisFilter.setDensityBasedClusterer(dc);
m_basisFilter.setInputFormat(instances);
Instances transformed = Filter.useFilter(instances, m_basisFilter);
if (instances.classAttribute().isNominal()) {
m_linear = null;
m_logistic = new Logistic();
m_logistic.setRidge(m_ridge);
m_logistic.setMaxIts(m_maxIts);
m_logistic.buildClassifier(transformed);
} else {
m_logistic = null;
m_linear = new LinearRegression();
m_linear.setAttributeSelectionMethod(new SelectedTag(
LinearRegression.SELECTION_NONE, LinearRegression.TAGS_SELECTION));
m_linear.setRidge(m_ridge);
m_linear.buildClassifier(transformed);
}
}
示例9: buildClassifier
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
/**
* Builds the classifier
*
* @param instances the training data
* @throws Exception if the classifier could not be built successfully
*/
public void buildClassifier(Instances instances) throws Exception {
// can classifier handle the data?
getCapabilities().testWithFail(instances);
// remove instances with missing class
instances = new Instances(instances);
instances.deleteWithMissingClass();
// only class? -> build ZeroR model
if (instances.numAttributes() == 1) {
System.err.println(
"Cannot build model (only class attribute present in data!), "
+ "using ZeroR model instead!");
m_ZeroR = new weka.classifiers.rules.ZeroR();
m_ZeroR.buildClassifier(instances);
return;
}
else {
m_ZeroR = null;
}
m_standardize = new Standardize();
m_standardize.setInputFormat(instances);
instances = Filter.useFilter(instances, m_standardize);
SimpleKMeans sk = new SimpleKMeans();
sk.setNumClusters(m_numClusters);
sk.setSeed(m_clusteringSeed);
MakeDensityBasedClusterer dc = new MakeDensityBasedClusterer();
dc.setClusterer(sk);
dc.setMinStdDev(m_minStdDev);
m_basisFilter = new ClusterMembership();
m_basisFilter.setDensityBasedClusterer(dc);
m_basisFilter.setInputFormat(instances);
Instances transformed = Filter.useFilter(instances, m_basisFilter);
if (instances.classAttribute().isNominal()) {
m_linear = null;
m_logistic = new Logistic();
m_logistic.setRidge(m_ridge);
m_logistic.setMaxIts(m_maxIts);
m_logistic.buildClassifier(transformed);
} else {
m_logistic = null;
m_linear = new LinearRegression();
m_linear.setAttributeSelectionMethod(new SelectedTag(LinearRegression.SELECTION_NONE,
LinearRegression.TAGS_SELECTION));
m_linear.setRidge(m_ridge);
m_linear.buildClassifier(transformed);
}
}
示例10: getParametersForKMeans
import weka.clusterers.SimpleKMeans; //导入方法依赖的package包/类
public static String getParametersForKMeans(Instances instances, String name) {
SimpleKMeans kmeans = new SimpleKMeans();
String options = "-init 0 -max-candidates 100 -periodic-pruning 10000 -min-density 2.0 -t1 -1.25 -t2 -1.0 -V -N 2 -A \"weka.core.EuclideanDistance -R first-last\" -I 500 -num-slots 1 -S 10";
try {
kmeans.setOptions(weka.core.Utils.splitOptions(options));
int numK = 20;
double[] results = new double[numK];
for (int i = 1; i <= numK; i++) {
kmeans.setNumClusters(i);
kmeans.buildClusterer(instances);
double[][] centroids = new double[i][kmeans.getClusterCentroids().get(0).numAttributes()];
double[][] stdDevs = new double[i][kmeans.getClusterStandardDevs().get(0).numAttributes()];
for (int j = 0; j < i; j++) {
for (int k = 0; k < kmeans.getClusterCentroids().get(0).numAttributes(); k++) {
if (!Double.isNaN(kmeans.getClusterCentroids().get(j).value(k)) && !Double.isNaN(kmeans.getClusterStandardDevs().get(j).value(k))) {
centroids[j][k] = kmeans.getClusterCentroids().get(j).value(k);
stdDevs[j][k] = kmeans.getClusterStandardDevs().get(j).value(k);
} else {
centroids[j][k] = 0.0;
stdDevs[j][k] = 0.0;
}
}
}
double sum_clust = 0;
for (int j = 0; j < centroids.length; j++) {
double sum_att = 0;
for (int k = 0; k < centroids[0].length; k++) {
double d = stdDevs[j][k];
sum_att += d;
}
sum_clust += sum_att;
}
results[i - 1] = sum_clust / centroids.length;
// Pokaż wyniki analizy skupisk
ClusterEvaluation evaluation = new ClusterEvaluation();
evaluation.setClusterer(kmeans);
evaluation.evaluateClusterer(instances);
//System.out.println(evaluation.clusterResultsToString());
}
System.out.println(name);
for (int i = 0; i < numK; i++) {
System.out.println(String.format("%.3f", results[i]));
}
System.out.println();
} catch (Exception e) {
e.printStackTrace();
}
return "";
}