本文整理汇总了C++中MPI_Type_struct函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_struct函数的具体用法?C++ MPI_Type_struct怎么用?C++ MPI_Type_struct使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Type_struct函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DefineMPITypes
DefineMPITypes()
{
Winspecs winspecs;
Flags flags;
rect rectangle;
int len[3], disp[3];
MPI_Datatype types[3];
NUM_type = MPI_DOUBLE;
MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
MPI_Type_commit(&winspecs_type);
len[0] = 10;
len[1] = 2;
len[2] = 6;
disp[0] = (int) ((char *) (&(flags.breakout)) - (char *) (&(flags)));
disp[1] = (int) ((char *) (&(flags.boundary_sq)) - (char *) (&(flags)));
disp[2] = (int) ((char *) (&(flags.rmin)) - (char *) (&(flags)));
types[0] = MPI_INT;
types[1] = MPI_DOUBLE;
types[2] = NUM_type;
MPI_Type_struct(3, len, disp, types, &flags_type);
MPI_Type_commit(&flags_type);
len[0] = 5;
disp[0] = (int) ((char *) (&(rectangle.l)) - (char *) (&(rectangle)));
types[0] = MPI_INT;
MPI_Type_struct(1, len, disp, types, &rect_type);
MPI_Type_commit(&rect_type);
return 0;
}
示例2: main
int main(int argc, char *argv[])
{
MPI_Datatype mystruct, vecs[3];
MPI_Aint stride = 5, displs[3];
int i=0, blockcount[3];
int errs=0;
MTest_Init( &argc, &argv );
for(i = 0; i < 3; i++)
{
MPI_Type_hvector(i, 1, stride, MPI_INT, &vecs[i]);
MPI_Type_commit(&vecs[i]);
blockcount[i]=1;
}
displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */
MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
MPI_Type_commit(&mystruct);
MPI_Type_free(&mystruct);
for(i = 0; i < 3; i++)
{
MPI_Type_free(&vecs[i]);
}
/* this time with the first argument always 0 */
for(i = 0; i < 3; i++)
{
MPI_Type_hvector(0, 1, stride, MPI_INT, &vecs[i]);
MPI_Type_commit(&vecs[i]);
blockcount[i]=1;
}
displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */
MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
MPI_Type_commit(&mystruct);
MPI_Type_free(&mystruct);
for(i = 0; i < 3; i++)
{
MPI_Type_free(&vecs[i]);
}
MTest_Finalize( errs );
MPI_Finalize();
return 0;
}
示例3: MPI_Address
void peano::applications::poisson::jacobitutorial::records::RegularGridCell::initDatatype() {
const int Attributes = 1;
MPI_Datatype subtypes[Attributes] = {
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
RegularGridCell dummyRegularGridCell[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridCell[0]))), &base);
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridCell::Datatype );
MPI_Type_commit( &RegularGridCell::Datatype );
}
示例4: offsetof
void BBLSGraph::createDatatypes() {
// BBLSNode struct
int block_lengths[5];
block_lengths[0] = 1;
block_lengths[1] = 1;
block_lengths[2] = 1;
block_lengths[3] = 1;
block_lengths[4] = 1;
MPI_Aint displacements[5];
displacements[0] = offsetof(BBLSNode, type);
displacements[1] = offsetof(BBLSNode, output);
displacements[2] = offsetof(BBLSNode, inputLeft);
displacements[3] = offsetof(BBLSNode, inputRight);
displacements[4] = sizeof(BBLSNode);
MPI_Datatype types[5];
types[0] = MPI_INT;
types[1] = MPI_UNSIGNED;
types[2] = MPI_UNSIGNED;
types[3] = MPI_UNSIGNED;
types[4] = MPI_UB;
MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
MPI_Type_commit(&mpi_nodeType);
// 3 BBLSNodes
MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
MPI_Type_commit(&mpi_threeNodes);
}
示例5: FC_FUNC
FC_FUNC( mpi_type_struct, MPI_TYPE_STRUCT )
(int * count, int * blocklens, long * displacements,
int *oldtypes_ptr, int *newtype, int *ierror)
{
*ierror=MPI_Type_struct(*count, blocklens, displacements,
oldtypes_ptr, newtype);
}
示例6: MPI_Address
void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //action
MPI_CHAR, //reduceState
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //action
1, //reduceState
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
}
示例7: MPI_Address
void peano::kernel::regulargrid::tests::records::TestCell::initDatatype() {
const int Attributes = 1;
MPI_Datatype subtypes[Attributes] = {
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
TestCell dummyTestCell[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyTestCell[0]))), &base);
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &TestCell::Datatype );
MPI_Type_commit( &TestCell::Datatype );
}
示例8: main
int main( int argc, char *argv[] )
{
int rank, size;
double dbuff = 0x0;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &size );
if ( rank != size-1 ) {
/* create pathological case */
MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
int blks[2] = { 1, 1};
MPI_Aint displs[2] = {0, sizeof(float) };
MPI_Datatype flt_int_type;
MPI_Type_struct( 2, blks, displs, types, &flt_int_type );
MPI_Type_commit( &flt_int_type );
MPI_Bcast( &dbuff, 1, flt_int_type, 0, MPI_COMM_WORLD );
MPI_Type_free( &flt_int_type );
}
else
MPI_Bcast( &dbuff, 1, MPI_FLOAT_INT, 0, MPI_COMM_WORLD );
MPI_Finalize();
return 0;
}
示例9: MPI_Address
void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
const int Attributes = 2;
MPI_Datatype subtypes[Attributes] = {
MPI_DOUBLE, //_blockPosition
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
DIMENSIONS, //_blockPosition
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
BlockPositionPacked dummyBlockPositionPacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), &disp[1] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
MPI_Type_commit( &BlockPositionPacked::Datatype );
}
示例10: Build_type
void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {
int block_lengths[3];
MPI_Aint displacements[3];
MPI_Datatype typelist[3];
MPI_Aint start_address;
MPI_Aint address;
block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
typelist[0] = MPI_FLOAT;
typelist[1] = MPI_FLOAT;
typelist[2] = MPI_INT;
displacements[0] = 0;
MPI_Address(a, &start_address);
MPI_Address(b, &address);
displacements[1] = address - start_address;
MPI_Address(n, &address);
displacements[2] = address - start_address;
MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
MPI_Type_commit(point_t);
}
示例11: MPI_Address
void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_INT, //action
MPI_CHAR, //reduceState
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
1, //action
1, //reduceState
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
}
示例12: MPI_Address
void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
const int Attributes = 3;
MPI_Datatype subtypes[Attributes] = {
MPI_DOUBLE, //_translationalForce
MPI_DOUBLE, //_torque
MPI_UB // end/displacement flag
};
int blocklen[Attributes] = {
3, //_translationalForce
3, //_torque
1 // end/displacement flag
};
MPI_Aint disp[Attributes];
ForceTorquePacked dummyForceTorquePacked[2];
MPI_Aint base;
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), &disp[0] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), &disp[1] );
MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), &disp[2] );
for (int i=1; i<Attributes; i++) {
assertion1( disp[i] > disp[i-1], i );
}
for (int i=0; i<Attributes; i++) {
disp[i] -= base;
}
MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
MPI_Type_commit( &ForceTorquePacked::Datatype );
}
示例13: rnemd_init
void rnemd_init(struct beads *b)
{
inuse = 1;
if (N % 2 == 1)
fatal(EINVAL, "rnemd: N must be even");
if (dd < 0 || dd >= 3 || dg < 0 || dg >= 3)
fatal(EINVAL, "rnemd: gradient / velocity invalid");
printf("rnemd: slabs=%ld swaps=%ld gradient=%ld velocity=%ld\n",
N, sw, dg, dd);
assert(N && sw);
int blocklens[2] = {1, 2};
struct rnemd_list q;
ptrdiff_t indices[2] = {(ptrdiff_t)&q.v - (ptrdiff_t)&q,
(ptrdiff_t)&q.pos - (ptrdiff_t)&q};
MPI_Datatype old_types[2] = {MPI_DOUBLE, MPI_INT};
MPI_Type_struct(ARRAY_SIZE(blocklens), blocklens, indices, old_types, &rnemd_type);
MPI_Type_commit(&rnemd_type);
MPI_Comm_rank(comm_grid, &rank);
MPI_Comm_size(comm_grid, &size);
max = MAX(0x10000, 2 * sw * size);
list = calloc(max, sizeof(*list));
if (list == NULL) novm("rnemd: list");
t0 = b->time;
}
示例14: Build_derived_type
void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
int block_lengths[3];
MPI_Aint displacements[3];
MPI_Aint addresses[4];
MPI_Datatype typelist[3];
/* Создает производный тип данных, содержащий три int */
/* Сначала нужно определить типы элементов */
typelist[0]=MPI_INT;
typelist[1]=MPI_INT;
typelist[2]=MPI_INT;
/* Определить количество элементов каждого типа */
block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
/* Вычислить смещения элементов * относительно indata */
MPI_Address(indata, &addresses[0]);
MPI_Address(&(indata->left), &addresses[1]);
MPI_Address(&(indata->right), &addresses[2]);
MPI_Address(&(indata->length), &addresses[3]);
displacements[0]=addresses[1]-addresses[0];
displacements[1]=addresses[2]-addresses[0];
displacements[2]=addresses[3]-addresses[0];
/* Создать производный тип */
MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
/* Зарегистрировать его для использования */
MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
示例15: mytype_commit
void mytype_commit(struct mystruct value){
MPI_Aint indices[3];
int blocklens[3];
MPI_Datatype old_types[3];
old_types[0] = MPI_CHAR;
old_types[1] = MPI_INT;
old_types[2] = MPI_DOUBLE;
blocklens[0] = 1;
blocklens[1] = 3;
blocklens[2] = 5;
MPI_Address(&value.ch, &indices[0]);
MPI_Address(&value.a, &indices[1]);
MPI_Address(&value.x, &indices[2]);
indices[2] = indices[2] - indices[0];
indices[1] = indices[1] - indices[0];
indices[0] = 0;
MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);
MPI_Type_commit(&mpistruct);
}