本文整理匯總了Java中de.lmu.ifi.dbs.elki.database.relation.Relation.size方法的典型用法代碼示例。如果您正苦於以下問題:Java Relation.size方法的具體用法?Java Relation.size怎麽用?Java Relation.size使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類de.lmu.ifi.dbs.elki.database.relation.Relation
的用法示例。
在下文中一共展示了Relation.size方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testRKNNQueries
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
private void testRKNNQueries(Relation<DoubleVector> rep, RKNNQuery<DoubleVector> lin_rknn_query, RKNNQuery<DoubleVector> preproc_rknn_query, int k) {
ArrayDBIDs sample = DBIDUtil.ensureArray(rep.getDBIDs());
List<? extends DoubleDBIDList> lin_rknn_ids = lin_rknn_query.getRKNNForBulkDBIDs(sample, k);
List<? extends DoubleDBIDList> preproc_rknn_ids = preproc_rknn_query.getRKNNForBulkDBIDs(sample, k);
for(int i = 0; i < rep.size(); i++) {
DoubleDBIDList lin_rknn = lin_rknn_ids.get(i);
DoubleDBIDList pre_rknn = preproc_rknn_ids.get(i);
DoubleDBIDListIter lin = lin_rknn.iter(), pre = pre_rknn.iter();
for(; lin.valid() && pre.valid(); lin.advance(), pre.advance(), i++) {
assertTrue(DBIDUtil.equal(lin, pre) || lin.doubleValue() == pre.doubleValue());
}
assertEquals("rkNN sizes do not agree for k=" + k, lin_rknn.size(), pre_rknn.size());
for(int j = 0; j < lin_rknn.size(); j++) {
assertTrue("rkNNs of linear scan and preprocessor do not match!", DBIDUtil.equal(lin_rknn.get(j), pre_rknn.get(j)));
assertEquals("rkNNs of linear scan and preprocessor do not match!", lin_rknn.get(j).doubleValue(), pre_rknn.get(j).doubleValue(), 0.);
}
}
}
示例2: evaluateClusters
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Evaluates the quality of the clusters.
*
* @param clusters the clusters to be evaluated
* @param dimensions the dimensions associated with each cluster
* @param database the database holding the objects
* @return a measure for the cluster quality
*/
private double evaluateClusters(ArrayList<PROCLUSCluster> clusters, long[][] dimensions, Relation<V> database) {
double result = 0;
for(int i = 0; i < dimensions.length; i++) {
PROCLUSCluster c_i = clusters.get(i);
double[] centroid_i = c_i.centroid;
long[] dims_i = dimensions[i];
double w_i = 0;
for(int d = BitsUtil.nextSetBit(dims_i, 0); d >= 0; d = BitsUtil.nextSetBit(dims_i, d + 1)) {
w_i += avgDistance(centroid_i, c_i.objectIDs, database, d);
}
w_i /= dimensions.length;
result += c_i.objectIDs.size() * w_i;
}
return result / database.size();
}
示例3: instantiate
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Full instantiation interface.
*
* @param database Database
* @param relation Relation
* @return Instance
*/
public Instance instantiate(Database database, Relation<V> relation) {
DistanceQuery<V> dq = database.getDistanceQuery(relation, EuclideanDistanceFunction.STATIC);
KNNQuery<V> knnq = database.getKNNQuery(dq, settings.k);
WritableDataStore<PCAFilteredResult> storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, PCAFilteredResult.class);
PCARunner pca = settings.pca;
EigenPairFilter filter = settings.filter;
Duration time = LOG.newDuration(this.getClass().getName() + ".preprocessing-time").begin();
FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress(this.getClass().getName(), relation.size(), LOG) : null;
for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
DoubleDBIDList ref = knnq.getKNNForDBID(iditer, settings.k);
PCAResult pcares = pca.processQueryResult(ref, relation);
storage.put(iditer, new PCAFilteredResult(pcares.getEigenPairs(), filter.filter(pcares.getEigenvalues()), 1., 0.));
LOG.incrementProcessed(progress);
}
LOG.ensureCompleted(progress);
LOG.statistics(time.end());
return new Instance(relation.getDBIDs(), storage, relation);
}
示例4: testSorting
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
@Test
public void testSorting() {
Database db = AbstractSimpleAlgorithmTest.makeSimpleDatabase(filename, -1);
Relation<? extends NumberVector> rel = db.getRelation(TypeUtil.NUMBER_VECTOR_FIELD);
ArrayModifiableDBIDs ids = DBIDUtil.newArray(rel.getDBIDs());
final int size = rel.size();
int dims = RelationUtil.dimensionality(rel);
SortDBIDsBySingleDimension sorter = new VectorUtil.SortDBIDsBySingleDimension(rel);
for(int d = 0; d < dims; d++) {
sorter.setDimension(d);
ids.sort(sorter);
assertEquals("Lost some DBID during sorting?!?", size, DBIDUtil.newHashSet(ids).size());
DBIDArrayIter it = ids.iter();
double prev = rel.get(it).doubleValue(d);
for(it.advance(); it.valid(); it.advance()) {
double next = rel.get(it).doubleValue(d);
assertTrue("Not correctly sorted: " + prev + " > " + next + " at pos " + it.getOffset(), prev <= next);
prev = next;
}
}
}
示例5: HilbertFeatures
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Constructor.
*
* @param relation Relation to index
* @param min Minimums for data space
* @param diameter Diameter of data space
*/
public HilbertFeatures(Relation<O> relation, double[] min, double diameter) {
super();
this.relation = relation;
this.min = min;
this.diameter = diameter;
this.pf = new HilFeature[relation.size()];
int pos = 0;
for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
pf[pos++] = new HilFeature(DBIDUtil.deref(iditer), new ComparableMaxHeap<DoubleDBIDPair>(k));
}
this.out = new ComparatorMinHeap<>(n, new Comparator<HilFeature>() {
@Override
public int compare(HilFeature o1, HilFeature o2) {
return Double.compare(o1.ubound, o2.ubound);
}
});
this.wlb = new ComparatorMinHeap<>(n, new Comparator<HilFeature>() {
@Override
public int compare(HilFeature o1, HilFeature o2) {
return Double.compare(o1.lbound, o2.lbound);
}
});
this.top = new HashSet<>(2 * n);
}
示例6: EvolutionarySearch
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Constructor.
*
* @param relation Database to use
* @param ranges DBID ranges to process
* @param m Population size
* @param random Random generator
*/
public EvolutionarySearch(Relation<V> relation, ArrayList<ArrayList<DBIDs>> ranges, int m, Random random) {
super();
this.ranges = ranges;
this.m = m;
this.dbsize = relation.size();
this.dim = RelationUtil.dimensionality(relation);
this.random = random;
}
示例7: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Performs the DiSH algorithm on the given database.
*
* @param relation Relation to process
*/
public Clustering<SubspaceModel> run(Database db, Relation<V> relation) {
if(mu >= relation.size()) {
throw new AbortException("Parameter mu is chosen unreasonably large. This won't yield meaningful results.");
}
DiSHClusterOrder opticsResult = new Instance(db, relation).run();
if(LOG.isVerbose()) {
LOG.verbose("Compute Clusters.");
}
return computeClusters(relation, opticsResult);
}
示例8: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Run the Eclat algorithm
*
* @param db Database to process
* @param relation Bit vector relation
* @return Frequent patterns found
*/
public FrequentItemsetsResult run(Database db, final Relation<BitVector> relation) {
// TODO: implement with resizable arrays, to not need dim.
final int dim = RelationUtil.dimensionality(relation);
final VectorFieldTypeInformation<BitVector> meta = RelationUtil.assumeVectorField(relation);
// Compute absolute minsupport
final int minsupp = getMinimumSupport(relation.size());
LOG.verbose("Build 1-dimensional transaction lists.");
Duration ctime = LOG.newDuration(STAT + "eclat.transposition.time").begin();
DBIDs[] idx = buildIndex(relation, dim, minsupp);
LOG.statistics(ctime.end());
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Building frequent itemsets", idx.length, LOG) : null;
Duration etime = LOG.newDuration(STAT + "eclat.extraction.time").begin();
final List<Itemset> solution = new ArrayList<>();
for(int i = 0; i < idx.length; i++) {
LOG.incrementProcessed(prog);
extractItemsets(idx, i, minsupp, solution);
}
LOG.ensureCompleted(prog);
Collections.sort(solution);
LOG.statistics(etime.end());
LOG.statistics(new LongStatistic(STAT + "frequent-itemsets", solution.size()));
return new FrequentItemsetsResult("Eclat", "eclat", solution, meta, relation.size());
}
示例9: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Run k-medoids
*
* @param database Database
* @param relation relation to use
* @return result
*/
public Clustering<MedoidModel> run(Database database, Relation<V> relation) {
if(relation.size() <= 0) {
return new Clustering<>("PAM Clustering", "pam-clustering");
}
if(k > 0x7FFF) {
throw new NotImplementedException("PAM supports at most " + 0x7FFF + " clusters.");
}
DistanceQuery<V> distQ = DatabaseUtil.precomputedDistanceQuery(database, relation, getDistanceFunction(), LOG);
DBIDs ids = relation.getDBIDs();
// Choose initial medoids
if(LOG.isStatistics()) {
LOG.statistics(new StringStatistic(KEY + ".initialization", initializer.toString()));
}
ArrayModifiableDBIDs medoids = DBIDUtil.newArray(initializer.chooseInitialMedoids(k, ids, distQ));
if(medoids.size() != k) {
throw new AbortException("Initializer " + initializer.toString() + " did not return " + k + " means, but " + medoids.size());
}
// Setup cluster assignment store
WritableIntegerDataStore assignment = DataStoreUtil.makeIntegerStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, -1);
run(distQ, ids, medoids, assignment);
ArrayModifiableDBIDs[] clusters = ClusteringAlgorithmUtil.partitionsFromIntegerLabels(ids, assignment, k);
// Wrap result
Clustering<MedoidModel> result = new Clustering<>("PAM Clustering", "pam-clustering");
for(DBIDArrayIter it = medoids.iter(); it.valid(); it.advance()) {
result.addToplevelCluster(new Cluster<>(clusters[it.getOffset()], new MedoidModel(DBIDUtil.deref(it))));
}
return result;
}
示例10: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Run the algorithm.
*
* @param relation data relation.
* @return
*/
public OutlierResult run(Relation<O> relation) {
final int k1 = k + 1; // Query size
final double perplexity = k / 3.;
KNNQuery<O> knnq = relation.getKNNQuery(getDistanceFunction(), k1);
final double logPerp = FastMath.log(perplexity);
double[] p = new double[k + 10];
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("KNNSOS scores", relation.size(), LOG) : null;
WritableDoubleDataStore scores = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_DB, 1.);
for(DBIDIter it = relation.iterDBIDs(); it.valid(); it.advance()) {
KNNList knns = knnq.getKNNForDBID(it, k1);
if(p.length < knns.size() + 1) {
p = new double[knns.size() + 10];
}
final DoubleDBIDListIter ki = knns.iter();
// Compute affinities
SOS.computePi(it, ki, p, perplexity, logPerp);
// Normalization factor:
double s = SOS.sumOfProbabilities(it, ki, p);
if(s > 0) {
ISOS.nominateNeighbors(it, ki, p, 1. / s, scores);
}
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
// Find minimum and maximum.
DoubleMinMax minmax = ISOS.transformScores(scores, relation.getDBIDs(), logPerp, phi);
DoubleRelation scoreres = new MaterializedDoubleRelation("kNN Stoachastic Outlier Selection", "knnsos-outlier", scores, relation.getDBIDs());
OutlierScoreMeta meta = new ProbabilisticOutlierScore(minmax.getMin(), minmax.getMax(), 0.);
return new OutlierResult(meta, scoreres);
}
示例11: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Runs the algorithm in the timed evaluation part.
*
* @param database Database context
* @param relation Data relation
*/
public OutlierResult run(Database database, Relation<O> relation) {
final DistanceQuery<O> distanceQuery = database.getDistanceQuery(relation, getDistanceFunction());
KNNQuery<O> knnQuery = database.getKNNQuery(distanceQuery, k + 1); // + query point
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Compute kNN weights", relation.size(), LOG) : null;
DoubleMinMax minmax = new DoubleMinMax();
WritableDoubleDataStore knnw_score = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_STATIC);
for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
final KNNList knn = knnQuery.getKNNForDBID(iditer, k + 1); // + query point
double skn = 0; // sum of the distances to the k nearest neighbors
int i = 0; // number of neighbors so far
for(DoubleDBIDListIter neighbor = knn.iter(); i < k && neighbor.valid(); neighbor.advance()) {
if(DBIDUtil.equal(iditer, neighbor)) {
continue;
}
skn += neighbor.doubleValue();
++i;
}
if(i < k) {
// Less than k neighbors found
// Approximative index, or k > data set size!
skn = Double.POSITIVE_INFINITY;
}
knnw_score.putDouble(iditer, skn);
minmax.put(skn);
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
DoubleRelation res = new MaterializedDoubleRelation("kNN weight Outlier Score", "knnw-outlier", knnw_score, relation.getDBIDs());
OutlierScoreMeta meta = new BasicOutlierScoreMeta(minmax.getMin(), minmax.getMax(), 0., Double.POSITIVE_INFINITY, 0.);
return new OutlierResult(meta, res);
}
示例12: instantiate
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
@Override
public Instance instantiate(Database database) {
Relation<V> relation = database.getRelation(getInputTypeRestriction());
DistanceQuery<V> dq = database.getDistanceQuery(relation, distFunc);
RangeQuery<V> rq = database.getRangeQuery(dq);
mvSize.reset();
mvSize2.reset();
mvCorDim.reset();
DataStore<PreDeConModel> storage = preprocess(PreDeConModel.class, relation, rq);
if(LOG.isVerbose()) {
LOG.verbose("Average neighborhood size: " + mvSize.toString());
LOG.verbose("Average correlation dimensionality: " + mvCorDim.toString());
LOG.verbose("Average correlated neighborhood size: " + mvSize2.toString());
final int dim = RelationUtil.dimensionality(relation);
if(mvSize.getMean() < 5 * dim) {
LOG.verbose("The epsilon parameter may be chosen too small.");
}
else if(mvSize.getMean() > .5 * relation.size()) {
LOG.verbose("The epsilon parameter may be chosen too large.");
}
else if(mvSize2.getMean() < 10) {
LOG.verbose("The epsilon parameter may be chosen too large, or delta too small.");
}
else if(mvSize2.getMean() < settings.minpts) {
LOG.verbose("The minPts parameter may be chosen too large.");
}
else {
LOG.verbose("As a first guess, you can try minPts < " + ((int) mvSize2.getMean()) //
+ ", but you will need to experiment with these parameters and epsilon.");
}
}
return new Instance(dq.getRelation().getDBIDs(), storage);
}
示例13: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
@Override
public Clustering<KMeansModel> run(Database database, Relation<V> relation) {
if(relation.size() <= 0) {
return new Clustering<>("k-Means Clustering", "kmeans-clustering");
}
// Choose initial means
if(LOG.isStatistics()) {
LOG.statistics(new StringStatistic(KEY + ".initialization", initializer.toString()));
}
double[][] means = initializer.chooseInitialMeans(database, relation, k, getDistanceFunction());
// Setup cluster assignment store
List<ModifiableDBIDs> clusters = new ArrayList<>();
for(int i = 0; i < k; i++) {
clusters.add(DBIDUtil.newHashSet((int) (relation.size() * 2. / k)));
}
WritableIntegerDataStore assignment = DataStoreUtil.makeIntegerStorage(relation.getDBIDs(), DataStoreFactory.HINT_TEMP | DataStoreFactory.HINT_HOT, -1);
double[] varsum = new double[k];
// Cluster distances
double[][] cdist = new double[k][k];
int[][] cnum = new int[k][k - 1];
IndefiniteProgress prog = LOG.isVerbose() ? new IndefiniteProgress("K-Means iteration", LOG) : null;
DoubleStatistic varstat = LOG.isStatistics() ? new DoubleStatistic(this.getClass().getName() + ".variance-sum") : null;
LongStatistic diststat = LOG.isStatistics() ? new LongStatistic(KEY + ".distance-computations") : null;
int iteration = 0;
for(; maxiter <= 0 || iteration < maxiter; iteration++) {
LOG.incrementProcessed(prog);
recomputeSeperation(means, cdist, cnum, diststat);
boolean changed = assignToNearestCluster(relation, means, clusters, assignment, varsum, cdist, cnum, diststat);
logVarstat(varstat, varsum);
if(LOG.isStatistics()) {
LOG.statistics(diststat);
}
// Stop if no cluster assignment changed.
if(!changed) {
break;
}
// Recompute means.
means = means(clusters, means, relation);
}
LOG.setCompleted(prog);
if(LOG.isStatistics()) {
LOG.statistics(new LongStatistic(KEY + ".iterations", iteration));
}
// Wrap result
Clustering<KMeansModel> result = new Clustering<>("k-Means Clustering", "kmeans-clustering");
for(int i = 0; i < clusters.size(); i++) {
DBIDs ids = clusters.get(i);
if(ids.size() == 0) {
continue;
}
KMeansModel model = new KMeansModel(means[i], varsum[i]);
result.addToplevelCluster(new Cluster<>(ids, model));
}
return result;
}
示例14: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
@Override
public void run() {
database.initialize();
Relation<O> relation = database.getRelation(distance.getInputTypeRestriction());
DistanceQuery<O> distanceQuery = database.getDistanceQuery(relation, distance);
KNNQuery<O> knnQ = database.getKNNQuery(distanceQuery, DatabaseQuery.HINT_HEAVY_USE);
// open file.
try (RandomAccessFile file = new RandomAccessFile(out, "rw");
FileChannel channel = file.getChannel();
// and acquire a file write lock
FileLock lock = channel.lock()) {
// write magic header
file.writeInt(KNN_CACHE_MAGIC);
int bufsize = k * 12 * 2 + 10; // Initial size, enough for 2 kNN.
ByteBuffer buffer = ByteBuffer.allocateDirect(bufsize);
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Computing kNN", relation.size(), LOG) : null;
for(DBIDIter it = relation.iterDBIDs(); it.valid(); it.advance()) {
final KNNList nn = knnQ.getKNNForDBID(it, k);
final int nnsize = nn.size();
// Grow the buffer when needed:
if(nnsize * 12 + 10 > bufsize) {
while(nnsize * 12 + 10 > bufsize) {
bufsize <<= 1;
}
buffer = ByteBuffer.allocateDirect(bufsize);
}
buffer.clear();
ByteArrayUtil.writeUnsignedVarint(buffer, it.internalGetIndex());
ByteArrayUtil.writeUnsignedVarint(buffer, nnsize);
int c = 0;
for(DoubleDBIDListIter ni = nn.iter(); ni.valid(); ni.advance(), c++) {
ByteArrayUtil.writeUnsignedVarint(buffer, ni.internalGetIndex());
buffer.putDouble(ni.doubleValue());
}
if(c != nn.size()) {
throw new AbortException("Sizes did not agree. Cache is invalid.");
}
buffer.flip();
channel.write(buffer);
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
lock.release();
}
catch(IOException e) {
LOG.exception(e);
}
// FIXME: close!
}
示例15: run
import de.lmu.ifi.dbs.elki.database.relation.Relation; //導入方法依賴的package包/類
/**
* Run the algorithm
*
* @param relation Data relation
* @return Outlier result
*/
public OutlierResult run(Relation<V> relation) {
// Use an array list of object IDs for fast random access by an offset
ArrayDBIDs objids = DBIDUtil.ensureArray(relation.getDBIDs());
// A bit set to flag objects as anomalous, none at the beginning
long[] bits = BitsUtil.zero(objids.size());
// Positive masked collection
DBIDs normalObjs = new MaskedDBIDs(objids, bits, true);
// Positive masked collection
DBIDs anomalousObjs = new MaskedDBIDs(objids, bits, false);
// resulting scores
WritableDoubleDataStore oscores = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_TEMP | DataStoreFactory.HINT_HOT);
// compute loglikelihood
double logLike = relation.size() * logml + loglikelihoodNormal(normalObjs, relation);
// LOG.debugFine("normalsize " + normalObjs.size() + " anormalsize " +
// anomalousObjs.size() + " all " + (anomalousObjs.size() +
// normalObjs.size()));
// LOG.debugFine(logLike + " loglike beginning" +
// loglikelihoodNormal(normalObjs, database));
DoubleMinMax minmax = new DoubleMinMax();
DBIDIter iter = objids.iter();
for(int i = 0; i < objids.size(); i++, iter.advance()) {
// LOG.debugFine("i " + i);
// Change mask to make the current object anomalous
BitsUtil.setI(bits, i);
// Compute new likelihoods
double currentLogLike = normalObjs.size() * logml + loglikelihoodNormal(normalObjs, relation) + anomalousObjs.size() * logl + loglikelihoodAnomalous(anomalousObjs);
// if the loglike increases more than a threshold, object stays in
// anomalous set and is flagged as outlier
final double loglikeGain = currentLogLike - logLike;
oscores.putDouble(iter, loglikeGain);
minmax.put(loglikeGain);
if(loglikeGain > c) {
// flag as outlier
// LOG.debugFine("Outlier: " + curid + " " + (currentLogLike -
// logLike));
// Update best logLike
logLike = currentLogLike;
}
else {
// LOG.debugFine("Inlier: " + curid + " " + (currentLogLike - logLike));
// undo bit set
BitsUtil.clearI(bits, i);
}
}
OutlierScoreMeta meta = new BasicOutlierScoreMeta(minmax.getMin(), minmax.getMax(), Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 0.0);
DoubleRelation res = new MaterializedDoubleRelation("Gaussian Mixture Outlier Score", "gaussian-mixture-outlier", oscores, relation.getDBIDs());
return new OutlierResult(meta, res);
}