本文整理汇总了C++中DataStore::activeIndicesBuffer方法的典型用法代码示例。如果您正苦于以下问题:C++ DataStore::activeIndicesBuffer方法的具体用法?C++ DataStore::activeIndicesBuffer怎么用?C++ DataStore::activeIndicesBuffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DataStore
的用法示例。
在下文中一共展示了DataStore::activeIndicesBuffer方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getTargetLayer
double L2NormProbe::getValueInternal(double timevalue, int index) {
if (index < 0 || index >= getParent()->getNBatch()) { return PV_FAILURE; }
PVLayerLoc const * loc = getTargetLayer()->getLayerLoc();
int const nx = loc->nx;
int const ny = loc->ny;
int const nf = loc->nf;
PVHalo const * halo = &loc->halo;
int const lt = halo->lt;
int const rt = halo->rt;
int const dn = halo->dn;
int const up = halo->up;
double l2normsq = 0.0;
pvadata_t const * aBuffer = getTargetLayer()->getLayerData() + index * getTargetLayer()->getNumExtended();
if (getMaskLayer()) {
PVLayerLoc const * maskLoc = getMaskLayer()->getLayerLoc();
PVHalo const * maskHalo = &maskLoc->halo;
pvadata_t const * maskLayerData = getMaskLayer()->getLayerData() + index*getMaskLayer()->getNumExtended(); // Is there a DataStore method to return the part of the layer data for a given batch index?
int const maskLt = maskHalo->lt;
int const maskRt = maskHalo->rt;
int const maskDn = maskHalo->dn;
int const maskUp = maskHalo->up;
if (maskHasSingleFeature()) {
assert(getTargetLayer()->getNumNeurons()==nx*ny*nf);
int nxy = nx*ny;
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
for (int kxy=0; kxy<nxy; kxy++) {
int kexMask = kIndexExtended(kxy, nx, ny, 1, maskLt, maskRt, maskDn, maskUp);
if (maskLayerData[kexMask]) {
int featureBase = kxy*nf;
for (int f=0; f<nf; f++) {
int kex = kIndexExtended(featureBase++, nx, ny, nf, lt, rt, dn, up);
pvadata_t val = aBuffer[kex];
l2normsq += val*val;
}
}
}
}
else {
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
int kexMask = kIndexExtended(k, nx, ny, nf, maskLt, maskRt, maskDn, maskUp);
// if (maskLayerData[kexMask]) {
pvadata_t val = aBuffer[kex];
l2normsq += maskLayerData[kexMask] * val*val;
// }
}
}
}
else {
if (getTargetLayer()->getSparseFlag()) {
DataStore * store = parent->icCommunicator()->publisherStore(getTargetLayer()->getLayerId());
int numActive = (int) store->numActiveBuffer(index)[0];
unsigned int const * activeList = store->activeIndicesBuffer(index);
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
for (int k=0; k<numActive; k++) {
int extIndex = activeList[k];
int inRestricted = !extendedIndexInBorderRegion(extIndex, nx, ny, nf, halo->lt, halo->rt, halo->dn, halo->up);
pvadata_t val = inRestricted * fabsf(aBuffer[extIndex]);
l2normsq += val*val;
}
}
else {
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for reduction(+ : l2normsq)
#endif // PV_USE_OPENMP_THREADS
for (int k=0; k<getTargetLayer()->getNumNeurons(); k++) {
int kex = kIndexExtended(k, nx, ny, nf, lt, rt, dn, up);
pvadata_t val = aBuffer[kex];
l2normsq += val*val;
}
}
}
return l2normsq;
}