本文整理汇总了Java中de.lmu.ifi.dbs.elki.database.ids.DBIDs.size方法的典型用法代码示例。如果您正苦于以下问题:Java DBIDs.size方法的具体用法?Java DBIDs.size怎么用?Java DBIDs.size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类de.lmu.ifi.dbs.elki.database.ids.DBIDs
的用法示例。
在下文中一共展示了DBIDs.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: insertAll
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Inserts the specified objects into this index. If a bulk load mode is
* implemented, the objects are inserted in one bulk.
*
* @param ids the objects to be inserted
*/
@Override
public final void insertAll(DBIDs ids) {
if(ids.isEmpty() || (ids.size() == 1)) {
return;
}
// Make an example leaf
if(canBulkLoad()) {
List<SpatialEntry> leafs = new ArrayList<>(ids.size());
for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
leafs.add(createNewLeafEntry(DBIDUtil.deref(iter)));
}
bulkLoad(leafs);
}
else {
for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
insert(iter);
}
}
doExtraIntegrityChecks();
}
示例2: run
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Run the algorithm.
*
* @param db Database
* @param rel Relation
*/
public ClusterOrder run(Database db, Relation<V> rel) {
DBIDs ids = rel.getDBIDs();
DistanceQuery<V> dq = db.getDistanceQuery(rel, EuclideanDistanceFunction.STATIC);
// initialize points used and reachability distance
reachDist = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, UNDEFINED_DISTANCE);
// compute projections, density estimates and neighborhoods
index.computeSetsBounds(rel, minPts, ids); // project points
inverseDensities = index.computeAverageDistInSet(); // compute densities
neighs = index.getNeighs(); // get neighbors of points
// compute ordering as for OPTICS
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("FastOPTICS clustering", ids.size(), LOG) : null;
processed = DBIDUtil.newHashSet(ids.size());
order = new ClusterOrder(ids, "FastOPTICS Cluster Order", "fast-optics");
for(DBIDIter it = ids.iter(); it.valid(); it.advance()) {
if(!processed.contains(it)) {
expandClusterOrder(DBIDUtil.deref(it), order, dq, prog);
}
}
index.logStatistics();
LOG.ensureCompleted(prog);
return order;
}
示例3: extractItemsets
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
private void extractItemsets(DBIDs iset, DBIDs[] idx, int[] buf, int depth, int start, int minsupp, List<Itemset> solution) {
// TODO: reuse arrays.
final int depth1 = depth + 1;
for(int i = start; i < idx.length; i++) {
if(idx[i] == null) {
continue;
}
DBIDs ids = mergeJoin(iset, idx[i]);
if(ids.size() < minsupp) {
continue;
}
buf[depth] = i;
int[] items = Arrays.copyOf(buf, depth1);
if(depth1 >= minlength) {
solution.add(new SparseItemset(items, ids.size()));
}
if(depth1 <= maxlength) {
extractItemsets(ids, idx, buf, depth1, i + 1, minsupp, solution);
}
}
}
示例4: run
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
public Result run(Database database, Relation<V> relation) {
DistanceQuery<V> distQuery = database.getDistanceQuery(relation, getDistanceFunction());
RangeQuery<V> rangeQuery = database.getRangeQuery(distQuery, radius);
MeanVariance numres = new MeanVariance();
final DBIDs ids = DBIDUtil.randomSample(relation.getDBIDs(), sampling, random);
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Performing range queries", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
numres.put(rangeQuery.getRangeForDBID(iter, radius).size());
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
final String prefix = this.getClass().getName();
LOG.statistics(new DoubleStatistic(prefix + ".mean", numres.getMean()));
LOG.statistics(new DoubleStatistic(prefix + ".std", numres.getSampleStddev()));
LOG.statistics(new DoubleStatistic(prefix + ".norm.mean", numres.getMean() / relation.size()));
LOG.statistics(new DoubleStatistic(prefix + ".norm.std", numres.getSampleStddev() / relation.size()));
LOG.statistics(new LongStatistic(prefix + ".samplesize", ids.size()));
return null;
}
示例5: delete
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Removes the objects from the database (by calling
* {@link #doDelete(DBIDRef)} for each object) and indexes and fires a
* deletion event.
*
* {@inheritDoc}
*/
@Override
public MultipleObjectsBundle delete(DBIDs ids) {
// Prepare bundle to return
MultipleObjectsBundle bundle = new MultipleObjectsBundle();
for(Relation<?> relation : relations) {
ArrayList<Object> data = new ArrayList<>(ids.size());
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
data.add(relation.get(iter));
}
bundle.appendColumn(relation.getDataTypeInformation(), data);
}
// remove from db
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
doDelete(iter);
}
// fire deletion event
eventManager.fireObjectsRemoved(ids);
return bundle;
}
示例6: getMeanKNNList
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
private double[] getMeanKNNList(DBIDs ids, Map<DBID, KNNList> knnLists) {
double[] means = new double[settings.kmax];
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
DBID id = DBIDUtil.deref(iter);
KNNList knns = knnLists.get(id);
int k = 0;
for(DoubleDBIDListIter it = knns.iter(); k < settings.kmax && it.valid(); it.advance(), k++) {
means[k] += it.doubleValue();
}
}
for(int k = 0; k < settings.kmax; k++) {
means[k] /= ids.size();
}
return means;
}
示例7: computeCOFScores
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Compute Connectivity outlier factors.
*
* @param knnq KNN query
* @param ids IDs to process
* @param acds Average chaining distances
* @param cofs Connectivity outlier factor storage
* @param cofminmax Score minimum/maximum tracker
*/
private void computeCOFScores(KNNQuery<O> knnq, DBIDs ids, DoubleDataStore acds, WritableDoubleDataStore cofs, DoubleMinMax cofminmax) {
FiniteProgress progressCOFs = LOG.isVerbose() ? new FiniteProgress("COF for objects", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
final KNNList neighbors = knnq.getKNNForDBID(iter, k);
// Aggregate the average chaining distances of all neighbors:
double sum = 0.;
for(DBIDIter neighbor = neighbors.iter(); neighbor.valid(); neighbor.advance()) {
// skip the point itself
if(DBIDUtil.equal(neighbor, iter)) {
continue;
}
sum += acds.doubleValue(neighbor);
}
final double cof = (sum > 0.) ? (acds.doubleValue(iter) * k / sum) : (acds.doubleValue(iter) > 0. ? Double.POSITIVE_INFINITY : 1.);
cofs.putDouble(iter, cof);
// update minimum and maximum
cofminmax.put(cof);
LOG.incrementProcessed(progressCOFs);
}
LOG.ensureCompleted(progressCOFs);
}
示例8: computeSimplifiedLOFs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Compute the simplified LOF factors.
*
* @param ids IDs to compute for
* @param knnq kNN query class
* @param slrds Object densities
* @param lofs SLOF output storage
* @param lofminmax Minimum and maximum scores
*/
private void computeSimplifiedLOFs(DBIDs ids, KNNQuery<O> knnq, WritableDoubleDataStore slrds, WritableDoubleDataStore lofs, DoubleMinMax lofminmax) {
FiniteProgress progressLOFs = LOG.isVerbose() ? new FiniteProgress("Simplified LOF scores", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
final double lof;
final double lrdp = slrds.doubleValue(iter);
final KNNList neighbors = knnq.getKNNForDBID(iter, k);
if(!Double.isInfinite(lrdp)) {
double sum = 0.;
int count = 0;
for(DBIDIter neighbor = neighbors.iter(); neighbor.valid(); neighbor.advance()) {
// skip the point itself
if(DBIDUtil.equal(neighbor, iter)) {
continue;
}
final double val = slrds.doubleValue(neighbor);
sum += val;
count++;
if(Double.isInfinite(val)) {
break;
}
}
lof = sum / (lrdp * count);
}
else {
lof = 1.0;
}
lofs.putDouble(iter, lof);
// update minimum and maximum
lofminmax.put(lof);
LOG.incrementProcessed(progressLOFs);
}
LOG.ensureCompleted(progressLOFs);
}
示例9: makeDoubleStorage
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public WritableDoubleDataStore makeDoubleStorage(DBIDs ids, int hints) {
if(ids instanceof DBIDRange) {
DBIDRange range = (DBIDRange) ids;
return new ArrayDoubleStore(range.size(), range);
}
else {
return new MapIntegerDBIDDoubleStore(ids.size());
}
}
示例10: makeCluster
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Make the cluster for the given object
*
* @param lead Leading object
* @param depth Linkage depth
* @param members Member objects
* @return Cluster
*/
protected Cluster<DendrogramModel> makeCluster(DBIDRef lead, double depth, DBIDs members) {
final String name;
if(members == null || members.size() == 1 && members.contains(lead)) {
name = "obj_" + DBIDUtil.toString(lead);
if (members == null) {
ArrayModifiableDBIDs m = DBIDUtil.newArray(1);
m.add(lead);
members = m;
}
}
else if(members.size() == 0) {
name = "mrg_" + DBIDUtil.toString(lead) + "_" + depth;
}
else if(depth < Double.POSITIVE_INFINITY) {
name = "clu_" + DBIDUtil.toString(lead) + "_" + depth;
}
else {
// Complete data set only?
name = "top_" + DBIDUtil.toString(lead);
}
DendrogramModel model;
if(members != null && !members.isEmpty() && pointerresult instanceof PointerPrototypeHierarchyRepresentationResult) {
model = new PrototypeDendrogramModel(depth, ((PointerPrototypeHierarchyRepresentationResult) pointerresult).findPrototype(members));
}
else {
model = new DendrogramModel(depth);
}
return new Cluster<>(name, members, model);
}
示例11: run
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Run the algorithm, with separate radius relation
*
* @param database Database
* @param relation Relation
* @param radrel Radius relation
* @return Null result
*/
public Result run(Database database, Relation<O> relation, Relation<NumberVector> radrel) {
if(queries != null) {
throw new AbortException("This 'run' method will not use the given query set!");
}
// Get a distance and kNN query instance.
DistanceQuery<O> distQuery = database.getDistanceQuery(relation, getDistanceFunction());
RangeQuery<O> rangeQuery = database.getRangeQuery(distQuery);
final DBIDs sample = DBIDUtil.randomSample(relation.getDBIDs(), sampling, random);
FiniteProgress prog = LOG.isVeryVerbose() ? new FiniteProgress("kNN queries", sample.size(), LOG) : null;
int hash = 0;
MeanVariance mv = new MeanVariance();
for(DBIDIter iditer = sample.iter(); iditer.valid(); iditer.advance()) {
double r = radrel.get(iditer).doubleValue(0);
DoubleDBIDList rres = rangeQuery.getRangeForDBID(iditer, r);
int ichecksum = 0;
for(DBIDIter it = rres.iter(); it.valid(); it.advance()) {
ichecksum += DBIDUtil.asInteger(it);
}
hash = Util.mixHashCodes(hash, ichecksum);
mv.put(rres.size());
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
if(LOG.isStatistics()) {
LOG.statistics("Result hashcode: " + hash);
LOG.statistics("Mean number of results: " + mv.getMean() + " +- " + mv.getNaiveStddev());
}
return null;
}
示例12: makeRecordStorage
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public WritableRecordStore makeRecordStorage(DBIDs ids, int hints, Class<?>... dataclasses) {
if(ids instanceof DBIDRange) {
DBIDRange range = (DBIDRange) ids;
Object[][] data = new Object[range.size()][dataclasses.length];
return new ArrayRecordStore(data, range);
}
else {
return new MapIntegerDBIDRecordStore(ids.size(), dataclasses.length);
}
}
示例13: computeROCResult
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
private ROCResult computeROCResult(int size, SetDBIDs positiveids, DBIDs order) {
if(order.size() != size) {
throw new IllegalStateException("Iterable result doesn't match database size - incomplete ordering?");
}
XYCurve roccurve = ROCEvaluation.materializeROC(new DBIDsTest(positiveids), new SimpleAdapter(order.iter()));
double rocauc = XYCurve.areaUnderCurve(roccurve);
return new ROCResult(roccurve, rocauc);
}
示例14: insertAll
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public void insertAll(DBIDs ids) {
List<MTreeEntry> objs = new ArrayList<>(ids.size());
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
DBID id = DBIDUtil.deref(iter);
final O object = relation.get(id);
objs.add(createNewLeafEntry(id, object, Double.NaN));
}
insertAll(objs);
}
示例15: run
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Run the algorithm on the given relation.
*
* @param database Database
* @param relation Relation to process
* @return Outlier result
*/
public OutlierResult run(Database database, Relation<? extends NumberVector> relation) {
@SuppressWarnings("unchecked")
PrimitiveDistanceQuery<? super NumberVector> distq = (PrimitiveDistanceQuery<? super NumberVector>) database.getDistanceQuery(relation, distanceFunction);
Collection<? extends NumberVector> refPoints = refp.getReferencePoints(relation);
if(refPoints.isEmpty()) {
throw new AbortException("Cannot compute ROS without reference points!");
}
DBIDs ids = relation.getDBIDs();
if(k >= ids.size()) {
throw new AbortException("k must not be chosen larger than the database size!");
}
// storage of distance/score values.
WritableDoubleDataStore rbod_score = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_STATIC | DataStoreFactory.HINT_HOT, Double.NaN);
// Compute density estimation:
for(NumberVector refPoint : refPoints) {
DoubleDBIDList referenceDists = computeDistanceVector(refPoint, relation, distq);
updateDensities(rbod_score, referenceDists);
}
// compute maximum density
DoubleMinMax mm = new DoubleMinMax();
for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
mm.put(rbod_score.doubleValue(iditer));
}
// compute ROS
double scale = mm.getMax() > 0. ? 1. / mm.getMax() : 1.;
mm.reset(); // Reuse
for(DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
double score = 1 - (rbod_score.doubleValue(iditer) * scale);
mm.put(score);
rbod_score.putDouble(iditer, score);
}
DoubleRelation scoreResult = new MaterializedDoubleRelation("Reference-points Outlier Scores", "reference-outlier", rbod_score, relation.getDBIDs());
OutlierScoreMeta scoreMeta = new BasicOutlierScoreMeta(mm.getMin(), mm.getMax(), 0., 1., 0.);
OutlierResult result = new OutlierResult(scoreMeta, scoreResult);
// adds reference points to the result. header information for the
// visualizer to find the reference points in the result
result.addChildResult(new ReferencePointsResult<>("Reference points", "reference-points", refPoints));
return result;
}