本文整理汇总了Java中de.lmu.ifi.dbs.elki.database.ids.DBIDs类的典型用法代码示例。如果您正苦于以下问题:Java DBIDs类的具体用法?Java DBIDs怎么用?Java DBIDs使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DBIDs类属于de.lmu.ifi.dbs.elki.database.ids包,在下文中一共展示了DBIDs类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: computeIDs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Computes all IDs
*
* @param ids the DBIDs to process
* @param knnQ the KNN query
* @return The computed intrinsic dimensionalities.
*/
protected DoubleDataStore computeIDs(DBIDs ids, KNNQuery<O> knnQ) {
WritableDoubleDataStore intDims = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP);
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Intrinsic dimensionality", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
double id = 0.;
try {
id = estimator.estimate(knnQ, iter, k_c + 1);
}
catch(ArithmeticException e) {
id = 0; // Too many duplicates, etc.
}
intDims.putDouble(iter, id);
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
return intDims;
}
示例2: runOnlineLOF
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Run OnlineLOF (with insertions and removals) on database.
*/
@SuppressWarnings("unchecked")
private static OutlierResult runOnlineLOF(UpdatableDatabase db) {
Relation<DoubleVector> rep = db.getRelation(TypeUtil.DOUBLE_VECTOR_FIELD);
// setup algorithm
OnlineLOF<DoubleVector> lof = new OnlineLOF<>(k, k, neighborhoodDistanceFunction, reachabilityDistanceFunction);
// run OnlineLOF on database
OutlierResult result = lof.run(db);
// insert new objects
ArrayList<DoubleVector> insertions = new ArrayList<>();
NumberVector.Factory<DoubleVector> o = RelationUtil.getNumberVectorFactory(rep);
int dim = RelationUtil.dimensionality(rep);
Random random = new Random(seed);
for(int i = 0; i < size; i++) {
DoubleVector obj = VectorUtil.randomVector(o, dim, random);
insertions.add(obj);
}
DBIDs deletions = db.insert(MultipleObjectsBundle.makeSimple(rep.getDataTypeInformation(), insertions));
// delete objects
db.delete(deletions);
return result;
}
示例3: computeSimplifiedLRDs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Compute the simplified reachability densities.
*
* @param ids IDs to process
* @param knnq kNN query class
* @param lrds Density output
*/
private void computeSimplifiedLRDs(DBIDs ids, KNNQuery<O> knnq, WritableDoubleDataStore lrds) {
FiniteProgress lrdsProgress = LOG.isVerbose() ? new FiniteProgress("Densities", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
final KNNList neighbors = knnq.getKNNForDBID(iter, k);
double sum = 0.0;
int count = 0;
for(DoubleDBIDListIter neighbor = neighbors.iter(); neighbor.valid(); neighbor.advance()) {
if(DBIDUtil.equal(neighbor, iter)) {
continue;
}
sum += neighbor.doubleValue();
count++;
}
// Avoid division by 0
final double lrd = (sum > 0) ? (count / sum) : Double.POSITIVE_INFINITY;
lrds.putDouble(iter, lrd);
LOG.incrementProcessed(lrdsProgress);
}
LOG.ensureCompleted(lrdsProgress);
}
示例4: updateWeights
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Update the weight list.
*
* @param weights Weight list
* @param ids IDs
* @param latest Added ID
* @param distQ Distance query
* @return Weight sum
* @param <T> Object type
*/
protected <T> double updateWeights(WritableDoubleDataStore weights, DBIDs ids, T latest, DistanceQuery<? super T> distQ) {
double weightsum = 0.;
for(DBIDIter it = ids.iter(); it.valid(); it.advance()) {
double weight = weights.doubleValue(it);
if(weight <= 0.) {
continue; // Duplicate, or already chosen.
}
double newweight = distQ.distance(latest, it);
if(newweight < weight) {
weights.putDouble(it, newweight);
weight = newweight;
}
weightsum += weight;
}
return weightsum;
}
示例5: extractItemsets
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
private void extractItemsets(DBIDs iset, DBIDs[] idx, int[] buf, int depth, int start, int minsupp, List<Itemset> solution) {
// TODO: reuse arrays.
final int depth1 = depth + 1;
for(int i = start; i < idx.length; i++) {
if(idx[i] == null) {
continue;
}
DBIDs ids = mergeJoin(iset, idx[i]);
if(ids.size() < minsupp) {
continue;
}
buf[depth] = i;
int[] items = Arrays.copyOf(buf, depth1);
if(depth1 >= minlength) {
solution.add(new SparseItemset(items, ids.size()));
}
if(depth1 <= maxlength) {
extractItemsets(ids, idx, buf, depth1, i + 1, minsupp, solution);
}
}
}
示例6: applyPrescaling
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Prescale each vector (except when in {@code skip}) with the given scaling
* function.
*
* @param scaling Scaling function
* @param relation Relation to read
* @param skip DBIDs to pass unmodified
* @return New relation
*/
public static Relation<NumberVector> applyPrescaling(ScalingFunction scaling, Relation<NumberVector> relation, DBIDs skip) {
if(scaling == null) {
return relation;
}
NumberVector.Factory<NumberVector> factory = RelationUtil.getNumberVectorFactory(relation);
DBIDs ids = relation.getDBIDs();
WritableDataStore<NumberVector> contents = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_HOT, NumberVector.class);
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
NumberVector v = relation.get(iter);
double[] raw = v.toArray();
if(!skip.contains(iter)) {
applyScaling(raw, scaling);
}
contents.put(iter, factory.newNumberVector(raw, ArrayLikeUtil.DOUBLEARRAYADAPTER));
}
return new MaterializedRelation<>(relation.getDataTypeInformation(), ids, "rescaled", contents);
}
示例7: buildIndex
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
private DBIDs[] buildIndex(Relation<BitVector> relation, int dim, int minsupp) {
ArrayModifiableDBIDs[] idx = new ArrayModifiableDBIDs[dim];
for(int i = 0; i < dim; i++) {
idx[i] = DBIDUtil.newArray();
}
for(DBIDIter iter = relation.iterDBIDs(); iter.valid(); iter.advance()) {
SparseFeatureVector<?> bv = relation.get(iter);
// TODO: only count those which satisfy minlength?
for(int it = bv.iter(); bv.iterValid(it); it = bv.iterAdvance(it)) {
idx[bv.iterDim(it)].add(iter);
}
}
// Forget non-frequent 1-itemsets.
for(int i = 0; i < dim; i++) {
if(idx[i].size() < minsupp) {
idx[i] = null;
}
else {
idx[i].sort();
}
}
return idx;
}
示例8: instantiate
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
@Override
public ProjectedIndex<O, I> instantiate(Relation<O> relation) {
if(!proj.getInputDataTypeInformation().isAssignableFromType(relation.getDataTypeInformation())) {
return null;
}
// FIXME: non re-entrant!
proj.initialize(relation.getDataTypeInformation());
Index inneri = null;
Relation<I> view = null;
if(materialize) {
DBIDs ids = relation.getDBIDs();
WritableDataStore<I> content = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_DB, proj.getOutputDataTypeInformation().getRestrictionClass());
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
content.put(iter, proj.project(relation.get(iter)));
}
view = new MaterializedRelation<>("Projected Index", "projected-index", proj.getOutputDataTypeInformation(), content, ids);
}
else {
view = new ProjectedView<>(relation, proj);
}
inneri = inner.instantiate(view);
if(inneri == null) {
return null;
}
return new ProjectedIndex<>(relation, proj, view, inneri, norefine, kmulti);
}
示例9: varianceOfCluster
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Variance contribution of a single cluster.
*
* If possible, this information is reused from the clustering process (when a
* KMeansModel is returned).
*
* @param cluster Cluster to access
* @param distanceFunction Distance function
* @param relation Data relation
* @param <V> Vector type
* @return Cluster variance
*/
public static <V extends NumberVector> double varianceOfCluster(Cluster<? extends MeanModel> cluster, NumberVectorDistanceFunction<? super V> distanceFunction, Relation<V> relation) {
MeanModel model = cluster.getModel();
if(model instanceof KMeansModel) {
return ((KMeansModel) model).getVarianceContribution();
}
// Re-compute:
DBIDs ids = cluster.getIDs();
DoubleVector mean = DoubleVector.wrap(model.getMean());
boolean squared = distanceFunction.isSquared();
double variance = 0.;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
double dist = distanceFunction.distance(relation.get(iter), mean);
variance += squared ? dist : dist * dist;
}
return variance;
}
示例10: removeDBIDs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
@Override
public boolean removeDBIDs(DBIDs ids) {
boolean success = false;
for(DBIDIter id = ids.iter(); id.valid(); id.advance()) {
int rm = id.internalGetIndex();
// TODO: when sorted, use binary search!
for(int i = 0; i < size; i++) {
if(store[i] == rm) {
--size;
store[i] = store[size];
success = true;
break;
}
}
}
return success;
}
示例11: run
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Run the COPAC algorithm.
*
* @param database Database
* @param relation Vector field relation
* @return COPAC clustering
*/
public Clustering<DimensionModel> run(Database database, Relation<V> relation) {
COPACNeighborPredicate.Instance npred = new COPACNeighborPredicate<V>(settings).instantiate(database, relation);
CorePredicate.Instance<DBIDs> cpred = new MinPtsCorePredicate(settings.minpts).instantiate(database);
Clustering<Model> dclusters = new GeneralizedDBSCAN.Instance<>(npred, cpred, false).run();
// Re-wrap the detected clusters for COPAC:
Clustering<DimensionModel> result = new Clustering<>("COPAC clustering", "copac-clustering");
// Generalized DBSCAN clusterings will be flat.
for(It<Cluster<Model>> iter = dclusters.iterToplevelClusters(); iter.valid(); iter.advance()) {
Cluster<Model> clus = iter.get();
if(clus.size() > 0) {
int dim = npred.dimensionality(clus.getIDs().iter());
DimensionModel model = new DimensionModel(dim);
result.addToplevelCluster(new Cluster<>(clus.getIDs(), model));
}
}
return result;
}
示例12: loglikelihoodNormal
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Computes the loglikelihood of all normal objects. Gaussian model
*
* @param objids Object IDs for 'normal' objects.
* @param relation Database
* @return loglikelihood for normal objects
*/
private double loglikelihoodNormal(DBIDs objids, Relation<V> relation) {
if(objids.isEmpty()) {
return 0;
}
CovarianceMatrix builder = CovarianceMatrix.make(relation, objids);
double[] mean = builder.getMeanVector();
double[][] covarianceMatrix = builder.destroyToSampleMatrix();
// test singulaere matrix
double[][] covInv = inverse(covarianceMatrix);
double covarianceDet = new LUDecomposition(covarianceMatrix).det();
double fakt = 1.0 / FastMath.sqrt(MathUtil.powi(MathUtil.TWOPI, RelationUtil.dimensionality(relation)) * covarianceDet);
// for each object compute probability and sum
double prob = 0;
for(DBIDIter iter = objids.iter(); iter.valid(); iter.advance()) {
double[] x = minusEquals(relation.get(iter).toArray(), mean);
double mDist = transposeTimesTimes(x, covInv, x);
prob += FastMath.log(fakt * FastMath.exp(-mDist * .5));
}
return prob;
}
示例13: makeUnmodifiable
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
@Override
public StaticDBIDs makeUnmodifiable(DBIDs existing) {
if(existing instanceof StaticDBIDs) {
return (StaticDBIDs) existing;
}
if(existing instanceof IntegerArrayDBIDs) {
return new UnmodifiableIntegerArrayDBIDs((IntegerArrayDBIDs) existing);
}
if(existing instanceof IntegerDBIDs) {
return new UnmodifiableIntegerDBIDs((IntegerDBIDs) existing);
}
if(existing instanceof ArrayDBIDs) {
return new UnmodifiableArrayDBIDs((ArrayDBIDs) existing);
}
return new UnmodifiableDBIDs(existing);
}
示例14: instantiate
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
@Override
public ProjectedIndex<O, O> instantiate(Relation<O> relation) {
if (!proj.getInputDataTypeInformation().isAssignableFromType(relation.getDataTypeInformation())) {
return null;
}
proj.initialize(relation.getDataTypeInformation());
final Relation<O> view;
if (materialize) {
DBIDs ids = relation.getDBIDs();
WritableDataStore<O> content = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_DB, proj.getOutputDataTypeInformation().getRestrictionClass());
for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
content.put(iter, proj.project(relation.get(iter)));
}
view = new MaterializedRelation<>("ECEF Projection", "ecef-projection", proj.getOutputDataTypeInformation(), content, ids);
} else {
view = new ProjectedView<>(relation, proj);
}
Index inneri = inner.instantiate(view);
if (inneri == null) {
return null;
}
return new LngLatAsECEFIndex<>(relation, proj, view, inneri, norefine);
}
示例15: ZCurveTransformer
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入依赖的package包/类
/**
* Constructor.
*
* @param relation Relation to transform
* @param ids IDs subset to process
*/
public ZCurveTransformer(Relation<? extends NumberVector> relation, DBIDs ids) {
this.dimensionality = RelationUtil.dimensionality(relation);
this.minValues = new double[dimensionality];
this.maxValues = new double[dimensionality];
// Compute scaling of vector space
Arrays.fill(minValues, Double.POSITIVE_INFINITY);
Arrays.fill(maxValues, Double.NEGATIVE_INFINITY);
for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
NumberVector vector = relation.get(iter);
for(int dim = 0; dim < dimensionality; ++dim) {
double dimValue = vector.doubleValue(dim);
minValues[dim] = Math.min(minValues[dim], dimValue);
maxValues[dim] = Math.max(maxValues[dim], dimValue);
}
}
}