本文整理汇总了Java中org.apache.commons.math3.ml.distance.EuclideanDistance类的典型用法代码示例。如果您正苦于以下问题:Java EuclideanDistance类的具体用法?Java EuclideanDistance怎么用?Java EuclideanDistance使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
EuclideanDistance类属于org.apache.commons.math3.ml.distance包,在下文中一共展示了EuclideanDistance类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testPerformClusterAnalysisToManyClusters
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* 2 variables cannot be clustered into 3 clusters. See issue MATH-436.
*/
@Test(expected=NumberIsTooSmallException.class)
public void testPerformClusterAnalysisToManyClusters() {
KMeansPlusPlusClusterer<DoublePoint> transformer =
new KMeansPlusPlusClusterer<DoublePoint>(3, 1, new EuclideanDistance(), random);
DoublePoint[] points = new DoublePoint[] {
new DoublePoint(new int[] {
1959, 325100
}), new DoublePoint(new int[] {
1960, 373200
})
};
transformer.cluster(Arrays.asList(points));
}
示例2: getPredictedValue
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* This method is to predict the label of a given data point
*/
private String getPredictedValue(Vector dataPointVector, int clusterIndex, double clusterBoundary) {
String prediction;
EuclideanDistance euclideanDistance = new EuclideanDistance();
Vector[] clusterCenters = kMeansModel.clusterCenters();
double[] dataPoint = dataPointVector.toArray();
double[] clusterCenter = clusterCenters[clusterIndex].toArray();
double distance = euclideanDistance.compute(clusterCenter, dataPoint);
if (distance > clusterBoundary) {
prediction = anomalyLabel;
} else {
prediction = normalLabel;
}
return prediction;
}
示例3: initializeNeighborhood
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
private void initializeNeighborhood() {
EuclideanDistance euclideanDistance = new EuclideanDistance();
double[] x = new double[numberOfWeightVectors];
int[] idx = new int[numberOfWeightVectors];
for (int i = 0; i < numberOfWeightVectors; i++) {
// calculate the distances based on weight vectors
for (int j = 0; j < numberOfWeightVectors; j++) {
x[j] = euclideanDistance.compute(weightVector[i], weightVector[j]);
idx[j] = j;
}
// find 'niche' nearest neighboring subproblems
minFastSort(x, idx, numberOfWeightVectors, neighborSize);
System.arraycopy(idx, 0, neighborhood[i], 0, neighborSize);
}
}
示例4: tune
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
@Override
public void tune(DataSequence observedSeries,
DataSequence expectedSeries,
IntervalSequence anomalySequence) throws Exception {
// Compute the time-series of errors.
HashMap<String, ArrayList<Float>> allErrors = aes.initAnomalyErrors(observedSeries, expectedSeries);
List<IdentifiedDoublePoint> points = new ArrayList<IdentifiedDoublePoint>();
EuclideanDistance ed = new EuclideanDistance();
int n = observedSeries.size();
for (int i = 0; i < n; i++) {
double[] d = new double[(aes.getIndexToError().keySet()).size()];
for (int e = 0; e < (aes.getIndexToError().keySet()).size(); e++) {
d[e] = allErrors.get(aes.getIndexToError().get(e)).get(i);
}
points.add(new IdentifiedDoublePoint(d, i));
}
double sum = 0.0;
double count = 0.0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
sum += ed.compute(points.get(i).getPoint(), points.get(j).getPoint());
count++;
}
}
eps = ((double) this.sDAutoSensitivity) * (sum / count);
minPoints = ((int) Math.ceil(((double) this.amntAutoSensitivity) * ((double) n)));
dbscan = new DBSCANClusterer<IdentifiedDoublePoint>(eps, minPoints);
}
示例5: clusters
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
@operator (
value = "kmeans",
can_be_const = false,
type = IType.LIST,
category = { IOperatorCategory.STATISTICAL },
concept = { IConcept.STATISTIC, IConcept.CLUSTERING })
@doc (
value = "returns the list of clusters (list of instance indices) computed with the kmeans++ algorithm from the first operand data according to the number of clusters to split the data into (k) and the maximum number of iterations to run the algorithm for (If negative, no maximum will be used) (maxIt). Usage: kmeans(data,k,maxit)",
special_cases = "if the lengths of two vectors in the right-hand aren't equal, returns 0",
examples = { @example (
value = "kmeans ([[2,4,5], [3,8,2], [1,1,3], [4,3,4]],2,10)",
isExecutable = false) })
public static GamaList<GamaList> KMeansPlusplusApache(final IScope scope, final GamaList data, final Integer k,
final Integer maxIt) throws GamaRuntimeException {
final MersenneTwister rand = new MersenneTwister(scope.getRandom().getSeed().longValue());
final List<DoublePoint> instances = new ArrayList<DoublePoint>();
for (int i = 0; i < data.size(); i++) {
final GamaList d = (GamaList) data.get(i);
final double point[] = new double[d.size()];
for (int j = 0; j < d.size(); j++) {
point[j] = Cast.asFloat(scope, d.get(j));
}
instances.add(new Instance(i, point));
}
final KMeansPlusPlusClusterer<DoublePoint> kmeans =
new KMeansPlusPlusClusterer<DoublePoint>(k, maxIt, new EuclideanDistance(), rand);
final List<CentroidCluster<DoublePoint>> clusters = kmeans.cluster(instances);
final GamaList results = (GamaList) GamaListFactory.create();
for (final Cluster<DoublePoint> cl : clusters) {
final GamaList clG = (GamaList) GamaListFactory.create();
for (final DoublePoint pt : cl.getPoints()) {
clG.addValue(scope, ((Instance) pt).getId());
}
results.addValue(scope, clG);
}
return results;
}
示例6: setUp
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
@Before
public void setUp() {
evaluator = new SumOfClusterVariances<DoublePoint>(new EuclideanDistance());
}
示例7: ClusterEvaluator
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* Creates a new cluster evaluator with an {@link EuclideanDistance}
* as distance measure.
*/
public ClusterEvaluator() {
this(new EuclideanDistance());
}
示例8: DBSCANClusterer
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* Creates a new instance of a DBSCANClusterer.
* <p>
* The euclidean distance will be used as default distance measure.
*
* @param eps maximum radius of the neighborhood to be considered
* @param minPts minimum number of points needed for a cluster
* @throws NotPositiveException if {@code eps < 0.0} or {@code minPts < 0}
*/
public DBSCANClusterer(final double eps, final int minPts)
throws NotPositiveException {
this(eps, minPts, new EuclideanDistance());
}
示例9: KMeansPlusPlusClusterer
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/** Build a clusterer.
* <p>
* The default strategy for handling empty clusters that may appear during
* algorithm iterations is to split the cluster with largest distance variance.
* <p>
* The euclidean distance will be used as default distance measure.
*
* @param k the number of clusters to split the data into
* @param maxIterations the maximum number of iterations to run the algorithm for.
* If negative, no maximum will be used.
*/
public KMeansPlusPlusClusterer(final int k, final int maxIterations) {
this(k, maxIterations, new EuclideanDistance());
}
示例10: FuzzyKMeansClusterer
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* Creates a new instance of a FuzzyKMeansClusterer.
* <p>
* The euclidean distance will be used as default distance measure.
*
* @param k the number of clusters to split the data into
* @param fuzziness the fuzziness factor, must be > 1.0
* @throws NumberIsTooSmallException if {@code fuzziness <= 1.0}
*/
public FuzzyKMeansClusterer(final int k, final double fuzziness) throws NumberIsTooSmallException {
this(k, fuzziness, -1, new EuclideanDistance());
}
示例11: WeightedKMeansPlusPlusClusterer
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/** Build a clusterer.
* <p>
* The default strategy for handling empty clusters that may appear during
* algorithm iterations is to split the cluster with largest distance variance.
* <p>
* The euclidean distance will be used as default distance measure.
*
* @param k the number of clusters to split the data into
* @param maxIterations the maximum number of iterations to run the algorithm for.
* If negative, no maximum will be used.
*/
public WeightedKMeansPlusPlusClusterer(final int k, final int maxIterations) {
this(k, maxIterations, new EuclideanDistance());
}
示例12: AdaptedIsoClustering
import org.apache.commons.math3.ml.distance.EuclideanDistance; //导入依赖的package包/类
/**
* Build a clusterer.
* <p/>
* The default strategy for handling empty clusters that may appear during
* algorithm iterations is to split the cluster with largest distance variance.
* <p/>
* The euclidean distance will be used as default distance measure.
*
* @param k the number of clusters to split the data into
* @param maxIterations the maximum number of iterations to run the algorithm for.
* If negative, no maximum will be used.
*/
public AdaptedIsoClustering(final int k, final int maxIterations) {
this(k, maxIterations, new EuclideanDistance());
}