本文整理汇总了C++中MPI_Address函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Address函数的具体用法?C++ MPI_Address怎么用?C++ MPI_Address使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Address函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPI_Address
void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::initDatatype() {
const int Attributes = 2;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //action
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //action
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
FaxenBatchJobRepositoryStatePacked dummyFaxenBatchJobRepositoryStatePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]._persistentRecords._action))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[1]._persistentRecords._action))), &disp[1] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &FaxenBatchJobRepositoryStatePacked::Datatype );
MPI_Type_commit( &FaxenBatchJobRepositoryStatePacked::Datatype );
}
示例2: MPI_Address
void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //action
MPI_CHAR, //reduceState
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //action
1, //reduceState
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
}
示例3: mytype_commit
void mytype_commit(struct mystruct value){
MPI_Aint indices[3];
int blocklens[3];
MPI_Datatype old_types[3];
old_types[0] = MPI_CHAR;
old_types[1] = MPI_INT;
old_types[2] = MPI_DOUBLE;
blocklens[0] = 1;
blocklens[1] = 3;
blocklens[2] = 5;
MPI_Address(&value.ch, &indices[0]);
MPI_Address(&value.a, &indices[1]);
MPI_Address(&value.x, &indices[2]);
indices[2] = indices[2] - indices[0];
indices[1] = indices[1] - indices[0];
indices[0] = 0;
MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);
MPI_Type_commit(&mpistruct);
}
示例4: Build_type
void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {
int block_lengths[3];
MPI_Aint displacements[3];
MPI_Datatype typelist[3];
MPI_Aint start_address;
MPI_Aint address;
block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
typelist[0] = MPI_FLOAT;
typelist[1] = MPI_FLOAT;
typelist[2] = MPI_INT;
displacements[0] = 0;
MPI_Address(a, &start_address);
MPI_Address(b, &address);
displacements[1] = address - start_address;
MPI_Address(n, &address);
displacements[2] = address - start_address;
MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
MPI_Type_commit(point_t);
}
示例5: InitializeMPIStuff
static void
InitializeMPIStuff(void)
{
const int n = 5;
int lengths[n] = {1, 1, 1, 1, 1};
MPI_Aint displacements[n] = {0, 0, 0, 0, 0};
MPI_Datatype types[n] = {MPI_FLOAT,
MPI_UNSIGNED_CHAR,
MPI_UNSIGNED_CHAR,
MPI_UNSIGNED_CHAR,
MPI_UNSIGNED_CHAR};
// create the MPI data type for Pixel
Pixel onePixel;
MPI_Address(&onePixel.z, &displacements[0]);
MPI_Address(&onePixel.r, &displacements[1]);
MPI_Address(&onePixel.g, &displacements[2]);
MPI_Address(&onePixel.b, &displacements[3]);
MPI_Address(&onePixel.a, &displacements[4]);
for (int i = n-1; i >= 0; i--)
displacements[i] -= displacements[0];
MPI_Type_struct(n, lengths, displacements, types,
&mpiTypePixel);
MPI_Type_commit(&mpiTypePixel);
// and the merge operation for a reduction
MPI_Op_create((MPI_User_function *)MergePixelBuffersOp, 1,
&mpiOpMergePixelBuffers);
}
示例6: MPI_Address
void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
const int Attributes = 2;
MPI_Datatype subtypes[Attributes] = {
MPI_DOUBLE, //_blockPosition
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
DIMENSIONS, //_blockPosition
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
BlockPositionPacked dummyBlockPositionPacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), &disp[1] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
MPI_Type_commit( &BlockPositionPacked::Datatype );
}
示例7: MPI_Address
void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //action
MPI_CHAR, //reduceState
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //action
1, //reduceState
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
}
示例8: Build_derived_type
void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
int block_lengths[3];
MPI_Aint displacements[3];
MPI_Aint addresses[4];
MPI_Datatype typelist[3];
/* Создает производный тип данных, содержащий три int */
/* Сначала нужно определить типы элементов */
typelist[0]=MPI_INT;
typelist[1]=MPI_INT;
typelist[2]=MPI_INT;
/* Определить количество элементов каждого типа */
block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
/* Вычислить смещения элементов * относительно indata */
MPI_Address(indata, &addresses[0]);
MPI_Address(&(indata->left), &addresses[1]);
MPI_Address(&(indata->right), &addresses[2]);
MPI_Address(&(indata->length), &addresses[3]);
displacements[0]=addresses[1]-addresses[0];
displacements[1]=addresses[2]-addresses[0];
displacements[2]=addresses[3]-addresses[0];
/* Создать производный тип */
MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
/* Зарегистрировать его для использования */
MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
示例9: MPI_Address
void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_DOUBLE, //_translationalForce
MPI_DOUBLE, //_torque
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
3, //_translationalForce
3, //_torque
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
ForceTorquePacked dummyForceTorquePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
MPI_Type_commit( &ForceTorquePacked::Datatype );
}
示例10: MPI_Address
void tarch::parallel::messages::RegisterAtNodePoolMessagePacked::initDatatype() {
const int Attributes = 2;
MPI_Datatype subtypes[Attributes] = {
MPI_SHORT, //nodeName
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
MPI_MAX_NAME_STRING_ADDED_ONE, //nodeName
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
RegisterAtNodePoolMessagePacked dummyRegisterAtNodePoolMessagePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]._persistentRecords._nodeName[0]))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyRegisterAtNodePoolMessagePacked[1]._persistentRecords._nodeName[0])), &disp[1] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegisterAtNodePoolMessagePacked::Datatype );
MPI_Type_commit( &RegisterAtNodePoolMessagePacked::Datatype );
}
示例11: initialiseType
/**
* Initialises the command package MPI type, we use this to illustrate how additional information (this case
* the parent rank) can be associated with commands
*/
static void initialiseType() {
struct PP_Control_Package package;
MPI_Aint pckAddress, dataAddress;
MPI_Address(&package, &pckAddress);
MPI_Address(&package.data, &dataAddress);
int blocklengths[3] = {1,1}, nitems=2;
MPI_Datatype types[3] = {MPI_CHAR, MPI_INT};
MPI_Aint offsets[3] = {0, dataAddress - pckAddress};
MPI_Type_create_struct(nitems, blocklengths, offsets, types, &PP_COMMAND_TYPE);
MPI_Type_commit(&PP_COMMAND_TYPE);
}
示例12: MPI_Address
void peano::applications::puregrid::records::RegularGridStatePacked::initDatatype() {
const int Attributes = 9;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //maxRefinementsPerIteration
MPI_DOUBLE, //meshWidth
MPI_DOUBLE, //numberOfInnerVertices
MPI_DOUBLE, //numberOfBoundaryVertices
MPI_DOUBLE, //numberOfOuterVertices
MPI_DOUBLE, //numberOfInnerCells
MPI_DOUBLE, //numberOfOuterCells
MPI_SHORT, //_packedRecords0
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //maxRefinementsPerIteration
DIMENSIONS, //meshWidth
1, //numberOfInnerVertices
1, //numberOfBoundaryVertices
1, //numberOfOuterVertices
1, //numberOfInnerCells
1, //numberOfOuterCells
1, //_packedRecords0
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
RegularGridStatePacked dummyRegularGridStatePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._maxRefinementsPerIteration))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._meshWidth[0]))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerVertices))), &disp[2] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfBoundaryVertices))), &disp[3] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterVertices))), &disp[4] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerCells))), &disp[5] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterCells))), &disp[6] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._packedRecords0))), &disp[7] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[1]._persistentRecords._maxRefinementsPerIteration))), &disp[8] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridStatePacked::Datatype );
MPI_Type_commit( &RegularGridStatePacked::Datatype );
}
示例13: MPI_Address
void peano::applications::poisson::multigrid::records::RegularGridState::initDatatype() {
const int Attributes = 9;
MPI_Datatype subtypes[Attributes] = {
MPI_DOUBLE, //omega
MPI_DOUBLE, //meshWidth
MPI_DOUBLE, //numberOfInnerVertices
MPI_DOUBLE, //numberOfBoundaryVertices
MPI_DOUBLE, //numberOfOuterVertices
MPI_DOUBLE, //numberOfInnerCells
MPI_DOUBLE, //numberOfOuterCells
MPI_CHAR, //gridIsStationary
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //omega
DIMENSIONS, //meshWidth
1, //numberOfInnerVertices
1, //numberOfBoundaryVertices
1, //numberOfOuterVertices
1, //numberOfInnerCells
1, //numberOfOuterCells
1, //gridIsStationary
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
RegularGridState dummyRegularGridState[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._omega))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._meshWidth[0]))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerVertices))), &disp[2] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfBoundaryVertices))), &disp[3] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterVertices))), &disp[4] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerCells))), &disp[5] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterCells))), &disp[6] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._gridIsStationary))), &disp[7] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[1]._persistentRecords._omega))), &disp[8] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridState::Datatype );
MPI_Type_commit( &RegularGridState::Datatype );
}
示例14: make_maskbase_struct
void make_maskbase_struct(void)
{
int blockcounts[2] = { 6, 5 };
MPI_Datatype types[2] = { MPI_DOUBLE, MPI_INT };
MPI_Aint displs[2];
maskbase mbase;
MPI_Address(&mbase.timesigma, &displs[0]);
MPI_Address(&mbase.numchan, &displs[1]);
displs[1] -= displs[0];
displs[0] = 0;
MPI_Type_struct(2, blockcounts, displs, types, &maskbase_type);
MPI_Type_commit(&maskbase_type);
}
示例15: append_to_message_real
//!
//! \brief
//!
void append_to_message_real(
std::vector< MPI_Aint >& displ
, std::vector< int >& count
)
{
MPI_Aint addr;
// Append cell composition
MPI_Address(&phi[0],&addr);
displ.push_back(addr);
count.push_back(phi.size());
// Append other properties
MPI_Address(&scalars,&addr);
displ.push_back(addr);
count.push_back(scalars.size());
}