本文整理汇总了C++中DataSpace::productOfComponents方法的典型用法代码示例。如果您正苦于以下问题:C++ DataSpace::productOfComponents方法的具体用法?C++ DataSpace::productOfComponents怎么用?C++ DataSpace::productOfComponents使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DataSpace
的用法示例。
在下文中一共展示了DataSpace::productOfComponents方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MappedBufferIntern
/** constructor
*
* @param size extent for each dimension (in elements)
*/
MappedBufferIntern(DataSpace<DIM> size):
DeviceBuffer<TYPE, DIM>(size, size),
pointer(nullptr), ownPointer(true)
{
#if( PMACC_CUDA_ENABLED == 1 )
CUDA_CHECK((cuplaError_t)cudaHostAlloc(&pointer, size.productOfComponents() * sizeof (TYPE), cudaHostAllocMapped));
#else
pointer = new TYPE[size.productOfComponents()];
#endif
reset(false);
}
示例2: init
virtual void init()
{
// __startAtomicTransaction(__getTransactionEvent());
size_t current_size = host->getCurrentSize();
DataSpace<DIM> hostCurrentSize = host->getCurrentDataSpace(current_size);
if (host->is1D() && device->is1D())
fastCopy(host->getPointer(), device->getPointer(), hostCurrentSize.productOfComponents());
else
copy(hostCurrentSize);
device->setCurrentSize(current_size);
this->activate();
// __setTransactionEvent(__endTransaction());
}
示例3: addExchangeBuffer
/**
* Add Exchange in dedicated memory space.
*
* An Exchange is added to this GridBuffer. The exchange buffers use
* the their own memory instead of using the GridBuffer's memory space.
*
* @param receive a Mask which describes the directions for the exchange
* @param dataSpace size of the newly created exchange buffer in each dimension
* @param sizeOnDevice if true, internal buffers have their size information on the device, too
*/
void addExchangeBuffer(const Mask &receive, const DataSpace<DIM> &dataSpace, uint32_t communicationTag, bool sizeOnDevice = false)
{
if (hasOneExchange && (communicationTag != lastUsedCommunicationTag))
throw std::runtime_error("It is not allowed to give the same GridBuffer different communicationTags");
lastUsedCommunicationTag = communicationTag;
/*don't create buffer with 0 (zero) elements*/
if (dataSpace.productOfComponents() != 0)
{
receiveMask = receiveMask + receive;
sendMask = this->receiveMask.getMirroredMask();
Mask send = receive.getMirroredMask();
for (uint32_t ex = 1; ex < 27; ++ex)
{
if (send.isSet(ex))
{
uint32_t uniqCommunicationTag = (communicationTag << 5) | ex;
if (!hasOneExchange && !privateGridBuffer::UniquTag::getInstance().isTagUniqu(uniqCommunicationTag))
{
std::stringstream message;
message << "unique exchange communication tag ("
<< uniqCommunicationTag << ") witch is created from communicationTag ("
<< communicationTag << ") allready used for other gridbuffer exchange";
throw std::runtime_error(message.str());
}
hasOneExchange = true;
if (sendExchanges[ex] != NULL)
{
throw std::runtime_error("Exchange already added!");
}
//GridLayout<DIM> memoryLayout(size);
maxExchange = std::max(maxExchange, ex + 1u);
sendExchanges[ex] = new ExchangeIntern<BORDERTYPE, DIM > (/*memoryLayout*/ dataSpace,
ex, uniqCommunicationTag, sizeOnDevice);
ExchangeType recvex = Mask::getMirroredExchangeType(ex);
maxExchange = std::max(maxExchange, recvex + 1u);
receiveExchanges[recvex] = new ExchangeIntern<BORDERTYPE, DIM > (/*memoryLayout*/ dataSpace,
recvex, uniqCommunicationTag, sizeOnDevice);
}
}
}
}
示例4: init
/*! initializes all processes to build a 3D-grid
*
* @param nodes number of GPU nodes in each dimension
* @param periodic specifying whether the grid is periodic (1) or not (0) in each dimension
*
* \warning throws invalid argument if cx*cy*cz != totalnodes
*/
void init(DataSpace<DIM3> numberProcesses, DataSpace<DIM3> periodic) throw (std::invalid_argument)
{
this->periodic = periodic;
//check if parameters are correct
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &mpiSize));
if (numberProcesses.productOfComponents() != mpiSize)
{
throw std::invalid_argument("wrong parameters or wrong mpirun-call!");
}
//1. create Communicator (computing_comm) of computing nodes (ranks 0...n)
MPI_Comm computing_comm = MPI_COMM_WORLD;
yoffset = 0;
// 2. create topology
//int dims[3];
dims[0] = numberProcesses.x();
dims[1] = numberProcesses.y();
dims[2] = numberProcesses.z();
topology = MPI_COMM_NULL;
int periods[] = {periodic.x(), periodic.y(), periodic.z()};
/*create new communicator based on cartesian coordinates*/
MPI_CHECK(MPI_Cart_create(computing_comm, DIM, dims, periods, 0, &topology));
// 3. update Host rank
hostRank = UpdateHostRank();
//4. update Coordinates
updateCoordinates();
}
示例5: assign
DINLINE void
ComputeGridValuePerFrame<T_ParticleShape, T_DerivedAttribute>::operator()
(
T_Acc const & acc,
FrameType& frame,
const int localIdx,
const TVecSuperCell superCell,
BoxTmp& tmpBox
)
{
/* \todo in the future and if useful, the functor can be a parameter */
T_DerivedAttribute particleAttribute;
auto particle = frame[localIdx];
/* particle attributes: in-cell position and generic, derived attribute */
const floatD_X pos = particle[position_];
const auto particleAttr = particleAttribute( particle );
/** Shift to the cell the particle belongs to
* range of particleCell: [DataSpace<simDim>::create(0), TVecSuperCell]
*/
const int particleCellIdx = particle[localCellIdx_];
const DataSpace<TVecSuperCell::dim> particleCell(
DataSpaceOperations<TVecSuperCell::dim>::map( superCell, particleCellIdx )
);
auto fieldTmpShiftToParticle = tmpBox.shift(particleCell);
/* loop around the particle's cell (according to shape) */
const DataSpace<simDim> lowMargin(LowerMargin().toRT());
const DataSpace<simDim> upMargin(UpperMargin().toRT());
const DataSpace<simDim> marginSpace(upMargin + lowMargin + 1);
const int numWriteCells = marginSpace.productOfComponents();
for (int i = 0; i < numWriteCells; ++i)
{
/** for the current cell i the multi dimensional index currentCell is only positive:
* allowed range = [DataSpace<simDim>::create(0), LowerMargin+UpperMargin]
*/
const DataSpace<simDim> currentCell = DataSpaceOperations<simDim>::map(marginSpace, i);
/** calculate the offset between the current cell i with simDim index currentCell
* and the cell of the particle (particleCell) in cells
*/
const DataSpace<simDim> offsetParticleCellToCurrentCell = currentCell - lowMargin;
/** assign particle contribution component-wise to the lower left corner of
* the cell i
* \todo take care of non-yee cells
*/
float_X assign( 1.0 );
for (uint32_t d = 0; d < simDim; ++d)
assign *= AssignmentFunction()(float_X(offsetParticleCellToCurrentCell[d]) - pos[d]);
/** add contribution of the particle times the generic attribute
* to cell i
* note: the .x() is used because FieldTmp is a scalar field with only
* one "x" component
*/
atomicAdd(
&(fieldTmpShiftToParticle(offsetParticleCellToCurrentCell).x()),
assign * particleAttr,
::alpaka::hierarchy::Threads{}
);
}
}
示例6: Buffer
/**
* constructor
* @param dataSpace description of spread of any dimension
*/
Buffer(DataSpace<DIM> dataSpace) :
data_space(dataSpace),data1D(true)
{
CUDA_CHECK(cudaMallocHost(¤t_size, sizeof (size_t)));
*current_size = dataSpace.productOfComponents();
}
示例7: HeapBuffer
/**
* Constructor
* \see GridBuffer
*/
HeapBuffer(DataSpace<DIM1> dataSpace) :
GridBuffer<VALUE, DIM1, BORDERVALUE>(dataSpace), ringBuffer(NULL)
{
ringBuffer = new RingBuffer<TYPE, TYPE > (dataSpace.productOfComponents());
}