本文整理汇总了C++中MPI_Type_contiguous函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_contiguous函数的具体用法?C++ MPI_Type_contiguous怎么用?C++ MPI_Type_contiguous使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Type_contiguous函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _setup_mpsort_mpi
static void
_setup_mpsort_mpi(struct crmpistruct * o,
struct crstruct * d,
void * myoutbase, size_t myoutnmemb,
MPI_Comm comm)
{
o->comm = comm;
MPI_Comm_size(comm, &o->NTask);
MPI_Comm_rank(comm, &o->ThisTask);
o->mybase = d->base;
o->mynmemb = d->nmemb;
o->myoutbase = myoutbase;
o->myoutnmemb = myoutnmemb;
MPI_Allreduce(&o->mynmemb, &o->nmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);
MPI_Allreduce(&o->myoutnmemb, &o->outnmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);
if(o->outnmemb != o->nmemb) {
fprintf(stderr, "total number of items in the item does not match the input %ld != %ld\n",
o->outnmemb, o->nmemb);
abort();
}
MPI_Type_contiguous(d->rsize, MPI_BYTE, &o->MPI_TYPE_RADIX);
MPI_Type_commit(&o->MPI_TYPE_RADIX);
MPI_Type_contiguous(d->size, MPI_BYTE, &o->MPI_TYPE_DATA);
MPI_Type_commit(&o->MPI_TYPE_DATA);
}
示例2: create_indexed_gap_ddt
static MPI_Datatype
create_indexed_gap_ddt( void )
{
ddt_gap dt[2];
MPI_Datatype dt1, dt2, dt3;
int bLength[2] = { 2, 1 };
MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
MPI_Aint displ[2];
MPI_Get_address( &(dt[0].is[0].i[0]), &(displ[0]) );
MPI_Get_address( &(dt[0].is[0].f), &(displ[1]) );
displ[1] -= displ[0];
displ[0] -= displ[0];
MPI_Type_create_struct( 2, bLength, displ, types, &dt1 );
/*MPI_DDT_DUMP( dt1 );*/
MPI_Type_contiguous( 3, dt1, &dt2 );
/*MPI_DDT_DUMP( dt2 );*/
bLength[0] = 1;
bLength[1] = 1;
MPI_Get_address( &(dt[0].v1), &(displ[0]) );
MPI_Get_address( &(dt[0].is[0]), &(displ[1]) );
displ[1] -= displ[0];
displ[0] -= displ[0];
types[0] = MPI_INT;
types[1] = dt2;
MPI_Type_create_struct( 2, bLength, displ, types, &dt3 );
/*MPI_DDT_DUMP( dt3 );*/
MPI_Type_free( &dt1 );
MPI_Type_free( &dt2 );
MPI_Type_contiguous( 10, dt3, &dt1 );
MPI_DDT_DUMP( dt1 );
MPI_Type_free( &dt3 );
MPI_Type_commit( &dt1 );
return dt1;
}
示例3: MPIOI_Type_block
/* Returns MPI_SUCCESS on success, an MPI error code on failure. Code above
* needs to call MPIO_Err_return_xxx.
*/
int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
int rank, int darg, int order, MPI_Aint orig_extent,
MPI_Datatype type_old, MPI_Datatype *type_new,
MPI_Aint *st_offset)
{
/* nprocs = no. of processes in dimension dim of grid
rank = coordinate of this process in dimension dim */
int blksize, global_size, mysize, i, j;
MPI_Aint stride;
global_size = array_of_gsizes[dim];
if (darg == MPI_DISTRIBUTE_DFLT_DARG)
blksize = (global_size + nprocs - 1)/nprocs;
else {
blksize = darg;
/* --BEGIN ERROR HANDLING-- */
if (blksize <= 0) {
return MPI_ERR_ARG;
}
if (blksize * nprocs < global_size) {
return MPI_ERR_ARG;
}
/* --END ERROR HANDLING-- */
}
j = global_size - blksize*rank;
mysize = ADIOI_MIN(blksize, j);
if (mysize < 0) mysize = 0;
stride = orig_extent;
if (order == MPI_ORDER_FORTRAN) {
if (dim == 0)
MPI_Type_contiguous(mysize, type_old, type_new);
else {
for (i=0; i<dim; i++) stride *= array_of_gsizes[i];
MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
}
}
else {
if (dim == ndims-1)
MPI_Type_contiguous(mysize, type_old, type_new);
else {
for (i=ndims-1; i>dim; i--) stride *= array_of_gsizes[i];
MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
}
}
*st_offset = blksize * rank;
/* in terms of no. of elements of type oldtype in this dimension */
if (mysize == 0) *st_offset = 0;
return MPI_SUCCESS;
}
示例4: handle
/*@
MPI_File_get_view - Returns the file view
Input Parameters:
. fh - file handle (handle)
Output Parameters:
. disp - displacement (nonnegative integer)
. etype - elementary datatype (handle)
. filetype - filetype (handle)
. datarep - data representation (string)
.N fortran
@*/
int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype,
MPI_Datatype * filetype, char *datarep)
{
int error_code;
ADIO_File adio_fh;
static char myname[] = "MPI_FILE_GET_VIEW";
int i, j, k, combiner;
MPI_Datatype copy_etype, copy_filetype;
ROMIO_THREAD_CS_ENTER();
adio_fh = MPIO_File_resolve(fh);
/* --BEGIN ERROR HANDLING-- */
MPIO_CHECK_FILE_HANDLE(adio_fh, myname, error_code);
if (datarep == NULL) {
error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_ARG, "**iodatarepnomem", 0);
error_code = MPIO_Err_return_file(adio_fh, error_code);
goto fn_exit;
}
/* --END ERROR HANDLING-- */
*disp = adio_fh->disp;
ADIOI_Strncpy(datarep,
(adio_fh->is_external32 ? "external32" : "native"), MPI_MAX_DATAREP_STRING);
MPI_Type_get_envelope(adio_fh->etype, &i, &j, &k, &combiner);
if (combiner == MPI_COMBINER_NAMED)
*etype = adio_fh->etype;
else {
/* FIXME: It is wrong to use MPI_Type_contiguous; the user could choose to
* re-implement MPI_Type_contiguous in an unexpected way. Either use
* MPID_Barrier as in MPICH or PMPI_Type_contiguous */
MPI_Type_contiguous(1, adio_fh->etype, ©_etype);
/* FIXME: Ditto for MPI_Type_commit - use NMPI or PMPI */
MPI_Type_commit(©_etype);
*etype = copy_etype;
}
/* FIXME: Ditto for MPI_Type_xxx - use NMPI or PMPI */
MPI_Type_get_envelope(adio_fh->filetype, &i, &j, &k, &combiner);
if (combiner == MPI_COMBINER_NAMED)
*filetype = adio_fh->filetype;
else {
MPI_Type_contiguous(1, adio_fh->filetype, ©_filetype);
MPI_Type_commit(©_filetype);
*filetype = copy_filetype;
}
fn_exit:
ROMIO_THREAD_CS_EXIT();
return MPI_SUCCESS;
}
示例5: MPI_Init
void CMPICommunicator::Init(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Type_contiguous(sizeof(Cell), MPI_BYTE, &cellDatatype);
MPI_Type_commit(&cellDatatype);
MPI_Type_contiguous(sizeof(Status), MPI_BYTE, &statusDatatype);
MPI_Type_commit(&statusDatatype);
}
示例6: Construct_MPI_Datatypes
void Construct_MPI_Datatypes(int rows, int cols)
{
// Contiguous memory vector
MPI_Type_contiguous(cols, MPI_DOUBLE, &MPI_Vector);
MPI_Type_commit(&MPI_Vector);
// Contiguous memory matrix
MPI_Type_contiguous(rows, MPI_Vector, &MPI_Matrix);
MPI_Type_commit(&MPI_Matrix);
return;
}
示例7: Build_matrix_type
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
MPI_FLOAT, &temp_mpi_t);
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Get_address(local_A, &start_address);
MPI_Get_address(&(local_A->n_bar), &address);
displacements[0] = address - start_address;
MPI_Get_address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_create_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t);
MPI_Type_commit(&local_matrix_mpi_t);
} /* Build_matrix_type */
示例8: dgraphAllreduceMaxSum2
int
dgraphAllreduceMaxSum2 (
Gnum * reduloctab, /* Pointer to array of local Gnum data */
Gnum * reduglbtab, /* Pointer to array of reduced Gnum data */
int redumaxsumnbr, /* Number of max + sum Gnum operations */
MPI_User_function * redufuncptr, /* Pointer to operator function */
MPI_Comm proccomm) /* Communicator to be used for reduction */
{
MPI_Datatype redutypedat; /* Data type for finding best separator */
MPI_Op reduoperdat; /* Handle of MPI operator for finding best separator */
if ((MPI_Type_contiguous (redumaxsumnbr, GNUM_MPI, &redutypedat) != MPI_SUCCESS) ||
(MPI_Type_commit (&redutypedat) != MPI_SUCCESS) ||
(MPI_Op_create (redufuncptr, 1, &reduoperdat) != MPI_SUCCESS)) {
errorPrint ("dgraphAllreduceMaxSum: communication error (1)");
return (1);
}
if (MPI_Allreduce (reduloctab, reduglbtab, 1, redutypedat, reduoperdat, proccomm) != MPI_SUCCESS) {
errorPrint ("dgraphAllreduceMaxSum: communication error (2)");
return (1);
}
if ((MPI_Op_free (&reduoperdat) != MPI_SUCCESS) ||
(MPI_Type_free (&redutypedat) != MPI_SUCCESS)) {
errorPrint ("dgraphAllreduceMaxSum: communication error (3)");
return (1);
}
return (0);
}
示例9: gather
void gather(int rank, int size, const int gran, body *bodies){
int i, j;
int sendto = (rank + 1) % size;
int recvfrom = ((rank + size) - 1) % size;
MPI_Datatype bodytype;
MPI_Type_contiguous(3, MPI_DOUBLE, &bodytype);
MPI_Type_commit(&bodytype);
MPI_Status status;
body *outbuf = (body *) malloc(gran*sizeof(body));
if (rank != 0) {
//memcpy(outbuf, bodies, gran*sizeof(body));
MPI_Send(bodies, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
for(i=0; i<size-rank-1; i++){
MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
MPI_Send(outbuf, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
}
}
else {
FILE *oFile;
oFile = fopen("peval_out.txt", "w");
//memcpy(outbuf, bodies, gran*sizeof(body));
for(j=0; j<gran; j++)
fprintf(oFile, "%15.10f %15.10f %15.10f\n", bodies[j].x, bodies[j].y, bodies[j].m);
for(i=0; i<size-rank-1; i++){
MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
for(j=0; j<gran; j++)
fprintf(oFile, "%15.10f %15.10f %15.10f\n", outbuf[j].x, outbuf[j].y, outbuf[j].m);
}
fclose(oFile);
}
free(outbuf);
}
示例10: mpiReduce_pickerV3
void mpiReduce_pickerV3(float *resDataAbsMaxPaddedGlobal,
size_t *resDataMaxIndPaddedGlobal,
size_t resSize,
eXCorrMerge bAbs)
{
resSizeMPI = resSize;
MPI_Datatype mpiType;
MPI_Type_contiguous((int) 2, MPI_FLOAT, &mpiType);
MPI_Type_commit(&mpiType);
float *resDataGlobalNode = NULL;
float *resDataGlobalNodeReduce = NULL;
array_new(resDataGlobalNode, 2*resSize);
array_new(resDataGlobalNodeReduce, 2*resSize);
memcpy(resDataGlobalNode,
resDataAbsMaxPaddedGlobal,
resSize*sizeof(float));
mpiOp_array_typecast(resDataMaxIndPaddedGlobal,
resDataGlobalNode+resSize,
resSize);
MPI_Op mpiOp;
switch (bAbs) {
case XCORR_MERGE_NEGATIVE:
MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3Abs,
1, // commutative
&mpiOp);
break;
case XCORR_MERGE_POSITIVE:
MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3,
1, // commutative
&mpiOp);
break;
default:
ERROR("mpiReduce_pickerV3", "unsupported merging mode");
}
MPI_Reduce(resDataGlobalNode,
resDataGlobalNodeReduce,
(int) resSize, // resSize elements of size 2*sizeof(float)
mpiType,
mpiOp,
0,
MPI_COMM_WORLD);
MPI_Op_free(&mpiOp);
memcpy(resDataAbsMaxPaddedGlobal,
resDataGlobalNodeReduce,
resSize*sizeof(float));
mpiOp_array_typecast(resDataGlobalNodeReduce+resSize,
resDataMaxIndPaddedGlobal,
resSize);
array_delete(resDataGlobalNode);
array_delete(resDataGlobalNodeReduce);
MPI_Type_free(&mpiType);
}
示例11: create_pattern
void create_pattern(gchar* name, PatternType type, gint iter, gint elem, gint level, GroupBlock* group)
{
Verbose("Creating pattern%d \"%s\" elem %d level %d\n", type, name, elem, level);
Pattern* pattern = pattern_new(type, iter, elem, level);
gint groupSize = (group? group->groupsize : size);
gint groupRank;
if (group)
MPI_Comm_rank(group->mpicomm, &groupRank);
else
groupRank = rank;
Verbose("GroupSize = %d, GroupRank = %d\n", groupSize, groupRank);
MPI_Type_contiguous(elem, MPI_BYTE, &pattern->eType);
MPI_Type_commit(&pattern->eType);
pattern->type_size = 1;
switch (type) {
/* contiguous data */
case PATTERN1: {
int array_sizes[] = { groupSize };
int array_subsizes[] = { 1 };
int array_starts[] = { groupRank };
MPI_Type_create_subarray(
1, /* number of array dimensions*/
array_sizes, /* number of eTypes in each dimension of the full array*/
array_subsizes, /* number of eTypes in each dimension of the subarray */
array_starts, /* starting coordinates of the subarray in each dimension*/
MPI_ORDER_C, /* array storage order flag (state) */
pattern->eType, /* eType (old datatype) */
&pattern->datatype);
MPI_Type_commit(&pattern->datatype);
break;
}
/* non-contiguous data */
case PATTERN2: {
int array_sizes[] = { iter, groupSize };
int array_subsizes[] = { iter, 1 };
int array_starts[] = { 0, groupRank };
MPI_Type_create_subarray(
2, /* number of array dimensions*/
array_sizes, /* number of eTypes in each dimension of the full array*/
array_subsizes, /* number of eTypes in each dimension of the subarray */
array_starts, /* starting coordinates of the subarray in each dimension*/
MPI_ORDER_C, /* array storage order flag (state) */
pattern->eType, /* eType (old datatype) */
&pattern->datatype);
MPI_Type_commit(&pattern->datatype);
break;
}
default: Error("Pattern%d not yet supported!\n", type);
}
g_hash_table_insert(patternMap, name, pattern);
}
示例12: parms_InitComplex
/* ----------------- Initialize complex data type and ops for MPI ----*/
void parms_InitComplex()
{
MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_CMPLX);
MPI_Type_commit( &MPI_CMPLX );
MPI_Op_create((MPI_User_function *)complex_sum, true, &MPI_CMPLX_SUM);
}
示例13: main
int main(int argc, char *argv[])
{
int rank;
MPI_Status status;
MPI_Datatype type;
double buffer[10] = {
1.11, 2.22, 3.33, 4.44, 5.55, 6.66, 7.77, 8.88, 9.99, 10.1010
};
MPI_Init(&argc, &argv);
MPI_Type_contiguous(5, MPI_DOUBLE, &type);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
MPI_Send(buffer, 2, type, 1, 123, MPI_COMM_WORLD);
}
else if (rank == 1)
{
double b[10];
MPI_Recv(b, 2, type, 0, 123, MPI_COMM_WORLD, &status);
}
MPI_Finalize();
return 0;
}
示例14: offsetof
void BBLSGraph::createDatatypes() {
// BBLSNode struct
int block_lengths[5];
block_lengths[0] = 1;
block_lengths[1] = 1;
block_lengths[2] = 1;
block_lengths[3] = 1;
block_lengths[4] = 1;
MPI_Aint displacements[5];
displacements[0] = offsetof(BBLSNode, type);
displacements[1] = offsetof(BBLSNode, output);
displacements[2] = offsetof(BBLSNode, inputLeft);
displacements[3] = offsetof(BBLSNode, inputRight);
displacements[4] = sizeof(BBLSNode);
MPI_Datatype types[5];
types[0] = MPI_INT;
types[1] = MPI_UNSIGNED;
types[2] = MPI_UNSIGNED;
types[3] = MPI_UNSIGNED;
types[4] = MPI_UB;
MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
MPI_Type_commit(&mpi_nodeType);
// 3 BBLSNodes
MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
MPI_Type_commit(&mpi_threeNodes);
}
示例15: escrita
void escrita()
{
int i;
MPI_Type_contiguous(TAMTUPLA, MPI_INT,&tupla);
MPI_Type_commit(&tupla);
ret = MPI_File_open( MPI_COMM_WORLD, "arquivofinal.dat",
MPI_MODE_WRONLY | MPI_MODE_CREATE,
MPI_INFO_NULL, &arquivofinal);
if (ret == 0)
printf("Arquivo final aberto com sucesso no processo %d \n", meu_ranque);
else
{
printf("Arquivo final aberto com erro no processo %d \n", meu_ranque);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_File_set_view( arquivofinal, 0,
MPI_INT, MPI_INT,
"native", MPI_INFO_NULL);
for (i = 0; i < TAMBUF; i+=TAMTUPLA)
MPI_File_write_ordered( arquivofinal, buf_leitura + i, 1, tupla, MPI_STATUS_IGNORE);
MPI_File_close(&arquivofinal);
}