本文整理汇总了Java中de.lmu.ifi.dbs.elki.database.ids.DBIDs.iter方法的典型用法代码示例。如果您正苦于以下问题:Java DBIDs.iter方法的具体用法?Java DBIDs.iter怎么用?Java DBIDs.iter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类de.lmu.ifi.dbs.elki.database.ids.DBIDs
的用法示例。
在下文中一共展示了DBIDs.iter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: applyPrescaling
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Prescale each vector (except when in {@code skip}) with the given scaling
* function.
*
* @param scaling Scaling function
* @param relation Relation to read
* @param skip DBIDs to pass unmodified
* @return New relation
*/
public static Relation<NumberVector> applyPrescaling(ScalingFunction scaling, Relation<NumberVector> relation, DBIDs skip) {
if(scaling == null) {
return relation;
}
NumberVector.Factory<NumberVector> factory = RelationUtil.getNumberVectorFactory(relation);
DBIDs ids = relation.getDBIDs();
WritableDataStore<NumberVector> contents = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_HOT, NumberVector.class);
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
NumberVector v = relation.get(iter);
double[] raw = v.toArray();
if(!skip.contains(iter)) {
applyScaling(raw, scaling);
}
contents.put(iter, factory.newNumberVector(raw, ArrayLikeUtil.DOUBLEARRAYADAPTER));
}
return new MaterializedRelation<>(relation.getDataTypeInformation(), ids, "rescaled", contents);
}
示例2: varianceOfCluster
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Variance contribution of a single cluster.
*
* If possible, this information is reused from the clustering process (when a
* KMeansModel is returned).
*
* @param cluster Cluster to access
* @param distanceFunction Distance function
* @param relation Data relation
* @param <V> Vector type
* @return Cluster variance
*/
public static <V extends NumberVector> double varianceOfCluster(Cluster<? extends MeanModel> cluster, NumberVectorDistanceFunction<? super V> distanceFunction, Relation<V> relation) {
MeanModel model = cluster.getModel();
if(model instanceof KMeansModel) {
return ((KMeansModel) model).getVarianceContribution();
}
// Re-compute:
DBIDs ids = cluster.getIDs();
DoubleVector mean = DoubleVector.wrap(model.getMean());
boolean squared = distanceFunction.isSquared();
double variance = 0.;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
double dist = distanceFunction.distance(relation.get(iter), mean);
variance += squared ? dist : dist * dist;
}
return variance;
}
示例3: step4
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Fourth step: Actualize the clusters if necessary
*
* @param id the id of the current object
* @param pi Pi data store
* @param lambda Lambda data store
* @param processedIDs the already processed ids
*/
private void step4(DBIDRef id, WritableDBIDDataStore pi, WritableDoubleDataStore lambda, DBIDs processedIDs) {
DBIDVar p_i = DBIDUtil.newVar();
// for i = 1..n
for(DBIDIter it = processedIDs.iter(); it.valid(); it.advance()) {
double l_i = lambda.doubleValue(it);
pi.assignVar(it, p_i); // p_i = pi(it)
double lp_i = lambda.doubleValue(p_i);
// if L(i) >= L(P(i))
if(l_i >= lp_i) {
// P(i) = n+1
pi.put(it, id);
}
}
}
示例4: assignToNearestCluster
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Returns a list of clusters. The k<sup>th</sup> cluster contains the ids of
* those FeatureVectors, that are nearest to the k<sup>th</sup> mean.
*
* @param relation the database to cluster
* @param ids IDs to process
* @param oldmeans a list of k means
* @param meanshift delta to apply to each mean
* @param changesize New cluster sizes
* @param clusters cluster assignment
* @param assignment Current cluster assignment
* @param varsum Sum of variances
* @return true when the object was reassigned
*/
protected boolean assignToNearestCluster(Relation<V> relation, DBIDs ids, double[][] oldmeans, double[][] meanshift, int[] changesize, List<? extends ModifiableDBIDs> clusters, WritableIntegerDataStore assignment, double[] varsum) {
boolean changed = false;
final NumberVectorDistanceFunction<? super V> df = getDistanceFunction();
for(DBIDIter iditer = ids.iter(); iditer.valid(); iditer.advance()) {
double mindist = Double.POSITIVE_INFINITY;
V fv = relation.get(iditer);
int minIndex = 0;
for(int i = 0; i < k; i++) {
double dist = df.distance(fv, DoubleVector.wrap(oldmeans[i]));
if(dist < mindist) {
minIndex = i;
mindist = dist;
}
}
varsum[minIndex] += mindist;
changed |= updateAssignment(iditer, fv, clusters, assignment, meanshift, changesize, minIndex);
}
return changed;
}
示例5: buildDerivatorDB
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Builds a database for the derivator consisting of the ids in the specified
* interval.
*
* @param relation the database storing the parameterization functions
* @param interval the interval to build the database from
* @return a database for the derivator consisting of the ids in the specified
* interval
*/
private Database buildDerivatorDB(Relation<ParameterizationFunction> relation, CASHInterval interval) {
DBIDs ids = interval.getIDs();
ProxyDatabase proxy = new ProxyDatabase(ids);
int dim = dimensionality(relation);
SimpleTypeInformation<DoubleVector> type = new VectorFieldTypeInformation<>(DoubleVector.FACTORY, dim);
WritableDataStore<DoubleVector> prep = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_HOT, DoubleVector.class);
// Project
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
prep.put(iter, DoubleVector.wrap(relation.get(iter).getColumnVector()));
}
if(LOG.isDebugging()) {
LOG.debugFine("db fuer derivator : " + ids.size());
}
MaterializedRelation<DoubleVector> prel = new MaterializedRelation<>(type, ids, null, prep);
proxy.addRelation(prel);
return proxy;
}
示例6: instantiate
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public ProjectedIndex<O, O> instantiate(Relation<O> relation) {
if(!proj.getInputDataTypeInformation().isAssignableFromType(relation.getDataTypeInformation())) {
return null;
}
proj.initialize(relation.getDataTypeInformation());
final Relation<O> view;
if(materialize) {
DBIDs ids = relation.getDBIDs();
WritableDataStore<O> content = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_DB, proj.getOutputDataTypeInformation().getRestrictionClass());
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
content.put(iter, proj.project(relation.get(iter)));
}
view = new MaterializedRelation<>("ECEF Projection", "ecef-projection", proj.getOutputDataTypeInformation(), content, ids);
}
else {
view = new ProjectedView<>(relation, proj);
}
Index inneri = inner.instantiate(view);
if(inneri == null) {
return null;
}
return new LatLngAsECEFIndex<>(relation, proj, view, inneri, norefine);
}
示例7: partitionsFromIntegerLabels
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Collect clusters from their [0;k-1] integer labels.
*
* @param ids Objects
* @param assignment Cluster assignment
* @param k Number of labels (must be labeled 0 to k-1)
* @return Partitions
*/
public static ArrayModifiableDBIDs[] partitionsFromIntegerLabels(DBIDs ids, IntegerDataStore assignment, int k) {
int[] sizes = new int[k];
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
sizes[assignment.intValue(iter)] += 1;
}
ArrayModifiableDBIDs[] clusters = new ArrayModifiableDBIDs[k];
for(int i = 0; i < k; i++) {
clusters[i] = DBIDUtil.newArray(sizes[i]);
}
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
clusters[assignment.intValue(iter)].add(iter);
}
return clusters;
}
示例8: getKNNForObject
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public KNNList getKNNForObject(V obj, int k) {
DBIDs candidates = getCandidates(obj);
// Refine.
KNNHeap heap = DBIDUtil.newHeap(k);
for(DBIDIter iter = candidates.iter(); iter.valid(); iter.advance()) {
final double dist = distanceQuery.distance(obj, iter);
super.incRefinements(1);
heap.insert(dist, iter);
}
return heap.toKNNList();
}
示例9: computeVOVs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Compute variance of volumes.
*
* @param knnq KNN query
* @param ids IDs to process
* @param vols Volumes
* @param vovs Variance of Volume storage
* @param vovminmax Score minimum/maximum tracker
*/
private void computeVOVs(KNNQuery<O> knnq, DBIDs ids, DoubleDataStore vols, WritableDoubleDataStore vovs, DoubleMinMax vovminmax) {
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Variance of Volume", ids.size(), LOG) : null;
boolean warned = false;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
KNNList knns = knnq.getKNNForDBID(iter, k);
DoubleDBIDListIter it = knns.iter();
double vbar = 0.;
for(; it.valid(); it.advance()) {
vbar += vols.doubleValue(it);
}
vbar /= knns.size(); // Average
double vov = 0.;
for(it.seek(0); it.valid(); it.advance()) {
double v = vols.doubleValue(it) - vbar;
vov += v * v;
}
if(!(vov < Double.POSITIVE_INFINITY) && !warned) {
LOG.warning("Variance of Volumes has hit double precision limits, results are not reliable.");
warned = true;
}
vov = (knns.size() > 1 && vov < Double.POSITIVE_INFINITY) ? vov / (knns.size() - 1) : Double.POSITIVE_INFINITY;
vovs.putDouble(iter, vov);
// update minimum and maximum
vovminmax.put(vov);
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
}
示例10: step3
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Third step: Determine the values for P and L
*
* @param id the id of the object to be inserted into the pointer
* representation
* @param pi Pi data store
* @param lambda Lambda data store
* @param processedIDs the already processed ids
* @param m Data store
*/
private void step3(DBIDRef id, WritableDBIDDataStore pi, WritableDoubleDataStore lambda, DBIDs processedIDs, WritableDoubleDataStore m) {
DBIDVar p_i = DBIDUtil.newVar();
// for i = 1..n
for(DBIDIter it = processedIDs.iter(); it.valid(); it.advance()) {
double l_i = lambda.doubleValue(it);
double m_i = m.doubleValue(it);
pi.assignVar(it, p_i); // p_i = pi(it)
double mp_i = m.doubleValue(p_i);
// if L(i) >= M(i)
if(l_i >= m_i) {
// M(P(i)) = min { M(P(i)), L(i) }
if(l_i < mp_i) {
m.putDouble(p_i, l_i);
}
// L(i) = M(i)
lambda.putDouble(it, m_i);
// P(i) = n+1;
pi.put(it, id);
}
else {
// M(P(i)) = min { M(P(i)), M(i) }
if(m_i < mp_i) {
m.putDouble(p_i, m_i);
}
}
}
}
示例11: jaccardCoefficient
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Compute the Jaccard coefficient
*
* @param neighbors1 SORTED neighbor ids of first
* @param neighbors2 SORTED neighbor ids of second
* @return Jaccard coefficient
*/
static protected double jaccardCoefficient(DBIDs neighbors1, DBIDs neighbors2) {
int intersection = 0;
int union = 0;
DBIDIter iter1 = neighbors1.iter();
DBIDIter iter2 = neighbors2.iter();
while(iter1.valid() && iter2.valid()) {
final int comp = DBIDUtil.compare(iter1, iter2);
union++;
if(comp == 0) {
intersection++;
iter1.advance();
iter2.advance();
}
else if(comp < 0) {
iter1.advance();
}
else // iter2 < iter1
{
iter2.advance();
}
}
// Count remaining objects
for(; iter1.valid(); iter1.advance()) {
union++;
}
for(; iter2.valid(); iter2.advance()) {
union++;
}
return ((double) intersection) / union;
}
示例12: getReferencePoints
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
@Override
public Collection<? extends NumberVector> getReferencePoints(Relation<? extends NumberVector> db) {
if(samplesize >= db.size()) {
LoggingUtil.warning("Requested sample size is larger than database size!");
return new RelationUtil.CollectionFromRelation<>(db);
}
DBIDs sample = DBIDUtil.randomSample(db.getDBIDs(), samplesize, rnd);
ArrayList<NumberVector> result = new ArrayList<>(sample.size());
for(DBIDIter it = sample.iter(); it.valid(); it.advance()) {
result.add(db.get(it));
}
return result;
}
示例13: computeLOFs
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Computes the Local outlier factor (LOF) of the specified objects.
*
* @param knnq the precomputed neighborhood of the objects w.r.t. the
* reference distance
* @param ids IDs to process
* @param lrds Local reachability distances
* @param lofs Local outlier factor storage
* @param lofminmax Score minimum/maximum tracker
*/
protected void computeLOFs(KNNQuery<O> knnq, DBIDs ids, DoubleDataStore lrds, WritableDoubleDataStore lofs, DoubleMinMax lofminmax) {
FiniteProgress progressLOFs = LOG.isVerbose() ? new FiniteProgress("LOF_SCORE for objects", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
final double lof;
final double lrdp = lrds.doubleValue(iter);
final KNNList neighbors = knnq.getKNNForDBID(iter, krefer);
if(!Double.isInfinite(lrdp)) {
double sum = 0.;
int count = 0;
for(DBIDIter neighbor = neighbors.iter(); neighbor.valid(); neighbor.advance()) {
// skip the point itself
if(DBIDUtil.equal(neighbor, iter)) {
continue;
}
final double val = lrds.doubleValue(neighbor);
sum += val;
count++;
if(Double.isInfinite(val)) {
break;
}
}
lof = sum / (lrdp * count);
}
else {
lof = 1.0;
}
lofs.putDouble(iter, lof);
// update minimum and maximum
lofminmax.put(lof);
LOG.incrementProcessed(progressLOFs);
}
LOG.ensureCompleted(progressLOFs);
}
示例14: computeCoreDists
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Compute the core distances for all objects.
*
* @param ids Objects
* @param knnQ kNN query
* @param minPts Minimum neighborhood size
* @return Data store with core distances
*/
protected WritableDoubleDataStore computeCoreDists(DBIDs ids, KNNQuery<O> knnQ, int minPts) {
final Logging LOG = getLogger();
final WritableDoubleDataStore coredists = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_DB);
FiniteProgress cprog = LOG.isVerbose() ? new FiniteProgress("Computing core sizes", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
coredists.put(iter, knnQ.getKNNForDBID(iter, minPts).getKNNDistance());
LOG.incrementProcessed(cprog);
}
LOG.ensureCompleted(cprog);
return coredists;
}
示例15: computeIDOS
import de.lmu.ifi.dbs.elki.database.ids.DBIDs; //导入方法依赖的package包/类
/**
* Computes all IDOS scores.
*
* @param ids the DBIDs to process
* @param knnQ the KNN query
* @param intDims Precomputed intrinsic dimensionalities
* @param idosminmax Output of minimum and maximum, for metadata
* @return ID scores
*/
protected DoubleDataStore computeIDOS(DBIDs ids, KNNQuery<O> knnQ, DoubleDataStore intDims, DoubleMinMax idosminmax) {
WritableDoubleDataStore ldms = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_STATIC);
FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("ID Outlier Scores for objects", ids.size(), LOG) : null;
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
final KNNList neighbors = knnQ.getKNNForDBID(iter, k_r);
double sum = 0.;
int cnt = 0;
for(DoubleDBIDListIter neighbor = neighbors.iter(); neighbor.valid(); neighbor.advance()) {
if(DBIDUtil.equal(iter, neighbor)) {
continue;
}
final double id = intDims.doubleValue(neighbor);
sum += id > 0 ? 1.0 / id : 0.;
if(++cnt == k_r) { // Always stop after at most k_r elements.
break;
}
}
final double id_q = intDims.doubleValue(iter);
final double idos = id_q > 0 ? id_q * sum / cnt : 0.;
ldms.putDouble(iter, idos);
idosminmax.put(idos);
LOG.incrementProcessed(prog);
}
LOG.ensureCompleted(prog);
return ldms;
}