本文整理汇总了C++中MPI_Type_vector函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_vector函数的具体用法?C++ MPI_Type_vector怎么用?C++ MPI_Type_vector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Type_vector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: makeHDF5type1
int makeHDF5type1(MPI_Datatype *type)
{
MPI_Datatype ctg, vect, structype, vec2, structype2,
vec3, structype3, vec4, structype4, vec5;
int b[3];
MPI_Aint d[3];
MPI_Datatype t[3];
MPI_Type_contiguous(4, MPI_BYTE, &ctg);
MPI_Type_vector(1, 5, 1, ctg, &vect);
b[0] = b[1] = b[2] = 1;
d[0] = 0; d[1] = 20; d[2] = 40;
t[0] = MPI_LB; t[1] = vect; t[2] = MPI_UB;
MPI_Type_create_struct(3, b, d, t, &structype);
MPI_Type_vector(1, 5, 1, structype, &vec2);
b[0] = b[1] = b[2] = 1;
d[0] = 0; d[1] = 0; d[2] = 400;
t[0] = MPI_LB; t[1] = vec2; t[2] = MPI_UB;
MPI_Type_create_struct(3, b, d, t, &structype2);
MPI_Type_vector(1, 5, 1, structype2, &vec3);
b[0] = b[1] = b[2] = 1;
d[0] = 0; d[1] = 0; d[2] = 4000;
t[0] = MPI_LB; t[1] = vec3; t[2] = MPI_UB;
MPI_Type_create_struct(3, b, d, t, &structype3);
MPI_Type_vector(1, 5, 1, structype3, &vec4);
b[0] = b[1] = b[2] = 1;
d[0] = 0; d[1] = 0; d[2] = 40000;
t[0] = MPI_LB; t[1] = vec4; t[2] = MPI_UB;
MPI_Type_create_struct(3, b, d, t, &structype4);
MPI_Type_vector(1, 1, 1, structype4, &vec5);
b[0] = b[1] = b[2] = 1;
d[0] = 0; d[1] = 160000; d[2] = 200000;
t[0] = MPI_LB; t[1] = vec5; t[2] = MPI_UB;
MPI_Type_create_struct(3, b, d, t, type);
MPI_Type_free(&ctg);
MPI_Type_free(&vect);
MPI_Type_free(&structype);
MPI_Type_free(&vec2);
MPI_Type_free(&structype2);
MPI_Type_free(&vec3);
MPI_Type_free(&structype3);
MPI_Type_free(&vec4);
MPI_Type_free(&structype4);
MPI_Type_free(&vec5);
MPI_Type_commit(type);
return 0;
}
示例2: MPI_Init
void Communicator::init(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Type_vector(1, 2, 2, MPI_DOUBLE, &MPI_VECTOR2D_);
MPI_Type_vector(1, 4, 4, MPI_DOUBLE, &MPI_TENSOR2D_);
MPI_Type_commit(&MPI_VECTOR2D_);
MPI_Type_commit(&MPI_TENSOR2D_);
}
示例3: heatMPIGather
/******************************************************
* Gathers all data on process 0
*
* For output and total energy calculation it is
* necessary to receive all sub-grids on process 0.
*
* It is a simple, but non-optimal implementation.
******************************************************/
void heatMPIGather (heatGrid *grid, dataMPI* mympi)
{
int block_size[4]; /*stores: x_start,y_start, num_cells_x, num_cells_y*/
MPI_Datatype blocktype;
MPI_Status status;
int i, size;
/*Slaves send data*/
if (mympi->rank != 0)
{
/*Prepare block info to be sent*/
block_size[0] = mympi->start_x;
block_size[1] = mympi->start_y;
block_size[2] = mympi->num_cells_x;
block_size[3] = mympi->num_cells_y;
/* Create datatype to communicate one block*/
MPI_Type_vector (
mympi->num_cells_y-1, /* #blocks */
mympi->num_cells_x, /* #elements per block */
grid->xsize+2, /* #stride */
MPI_DOUBLE, /* old type */
&blocktype /* new type */ );
MPI_Type_commit (&blocktype);
MPI_Send (block_size, 4, MPI_INT, 0, 123, MPI_COMM_WORLD);
MPI_Send (&grid->theta[mympi->start_y][mympi->start_x],1 ,blocktype, 0, 123, MPI_COMM_WORLD);
MPI_Type_free (&blocktype);
}
else
/*Master Receives data*/
{
MPI_Comm_size (MPI_COMM_WORLD, &size);
for (i = 1; i < size; i++)
{
/*Receive Block Info*/
MPI_Recv (block_size, 4, MPI_INT, i, 123, MPI_COMM_WORLD, &status);
/* Create datatype to communicate one block*/
MPI_Type_vector (
block_size[3], /* #blocks */
block_size[2], /* #elements per block */
grid->xsize+2, /* #stride */
MPI_DOUBLE, /* old type */
&blocktype /* new type */ );
MPI_Type_commit (&blocktype);
MPI_Recv (&grid->theta[block_size[1]][block_size[0]],1 ,blocktype, i, 123, MPI_COMM_WORLD, &status);
MPI_Type_free (&blocktype);
}
}
}
示例4: main
int main(int argc, char *argv[])
{
int rank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Datatype type;
MPI_Type_contiguous(2, MPI_INT, &type);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
int buffer[1000];
for (int i = 0; i < 1000; i++) {
buffer[i] = i;
}
// 2 * (6x int + 5x space)
MPI_Datatype vtype;
MPI_Type_vector(2, 3, 5, type, &vtype);
MPI_Type_commit(&vtype);
MPI_Send(buffer, 4, vtype, 1, 123, MPI_COMM_WORLD);
MPI_Send(buffer, 4, vtype, 1, 123, MPI_COMM_WORLD);
}
else if (rank == 1)
{
int buffer1[1000], buffer2[1000];
for (int i = 0; i < 1000; i++) {
buffer1[i] = -1;
buffer2[i] = -1;
}
MPI_Recv(buffer1, 48, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
for (int i = 0; i < 50; i++) {
printf("%i ", buffer1[i]);
}
printf("\n");
MPI_Datatype vtype;
MPI_Type_vector(4, 6, 7, MPI_INT, &vtype);
MPI_Type_commit(&vtype);
MPI_Recv(buffer2, 2, vtype, 0, 123, MPI_COMM_WORLD, &status);
for (int i = 0; i < 50; i++) {
printf("%i ", buffer2[i]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
示例5: Setup_MPI_Datatypes
void Setup_MPI_Datatypes()
{
Debug("Setup_MPI_Datatypes", 0);
/* Datatype for vertical data exchange (Y_DIR) */
MPI_Type_vector(dim[X_DIR] - 2, 1, dim[Y_DIR], MPI_DOUBLE, &border_type[Y_DIR]);
MPI_Type_commit(&border_type[Y_DIR]);
/* Datatype for horizontal data exchange (X_DIR) */
MPI_Type_vector(dim[Y_DIR] - 2, 1, 1, MPI_DOUBLE, &border_type[X_DIR]);
MPI_Type_commit(&border_type[X_DIR]);
}
示例6: get_datatypes
int get_datatypes( int *grid, int *start, int *end, \
MPI_Datatype *faces, int msg_fac )
{
int count, blocklength;
int stride;
MPI_Aint extent, i;
MPI_Datatype z_row, oneface[3];
/* set up datatype for x_faces */
count = end[1] - start[1] + 1;
blocklength = end[2] - start[2] + 1;
stride = grid[2];
MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
&oneface[0] );
/* set up datatype for y_faces */
count = end[0] - start[0] + 1;
blocklength = end[2] - start[2] + 1;
stride = grid[1] * grid[2];
MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
&oneface[1] );
/* set up datatype for z_faces */
count = end[1] - start[1] + 1;
blocklength = 1;
stride = grid[2];
MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
&z_row );
MPI_Type_commit( &z_row );
count = end[0] - start[0] + 1;
blocklength = 1;
MPI_Type_extent( MPI_DOUBLE, &extent );
extent = grid[1] * grid[2] * extent;
MPI_Type_hvector( count, blocklength, extent, z_row, \
&oneface[2] );
for( i=0 ; i<3 ; i++ )
{
MPI_Type_commit( &oneface[i] );
MPI_Type_vector( msg_fac, 1, 0, oneface[i], &faces[i] );
MPI_Type_commit( &faces[i] );
MPI_Type_free( &oneface[i] );
}
/* Free the z_row Type */
MPI_Type_free( &z_row );
return 0;
}
示例7: main
int main(int argc, char **argv)
{
MPI_Datatype vec;
MPI_Comm comm;
double *vecin, *vecout;
int minsize = 2, count;
int root, i, n, stride, errs = 0;
int rank, size;
MTest_Init(&argc, &argv);
while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
for (root = 0; root < size; root++) {
for (count = 1; count < 65000; count = count * 2) {
n = 12;
stride = 10;
vecin = (double *) malloc(n * stride * size * sizeof(double));
vecout = (double *) malloc(size * n * sizeof(double));
MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
MPI_Type_commit(&vec);
for (i = 0; i < n * stride; i++)
vecin[i] = -2;
for (i = 0; i < n; i++)
vecin[i * stride] = rank * n + i;
MPI_Gather(vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm);
if (rank == root) {
for (i = 0; i < n * size; i++) {
if (vecout[i] != i) {
errs++;
if (errs < 10) {
fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
}
}
}
}
MPI_Type_free(&vec);
free(vecin);
free(vecout);
}
}
MTestFreeComm(&comm);
}
/* do a zero length gather */
MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
示例8: partition_matrix
double* partition_matrix(double *a,
int N, int gd,
MPI_Datatype *type_block)
{
MPI_Datatype type_block_tmp;
int NB = N/gd;
double* b = malloc(NB*NB*sizeof(double));
MPI_Type_vector(NB, NB, N, MPI_DOUBLE, &type_block_tmp);
MPI_Type_create_resized(type_block_tmp, 0, sizeof(double), type_block);
MPI_Type_commit(type_block);
int counts[gd*gd];
int disps[gd*gd];
for (int i=0; i<gd; i++) {
for (int j=0; j<gd; j++) {
disps[i*gd+j] = i*N*NB+j*NB;
counts [i*gd+j] = 1;
}
}
MPI_Scatterv(a, counts, disps, *type_block, b, NB*NB, MPI_DOUBLE, 0, MPI_COMM_WORLD);
return b;
}
示例9: type_create_contiguous_x
static int type_create_contiguous_x(MPI_Count count,
MPI_Datatype oldtype, MPI_Datatype *newtype)
{
/* to make 'count' fit MPI-3 type processing routines (which take integer
* counts), we construct a type consisting of N INT_MAX chunks followed by
* a remainder. e.g for a count of 4000000000 bytes you would end up with
* one 2147483647-byte chunk followed immediately by a 1852516353-byte
* chunk */
MPI_Datatype chunks, remainder;
MPI_Aint lb, extent, disps[2];
int blocklens[2];
MPI_Datatype types[2];
MPI_Count c = count/INT_MAX;
MPI_Count r = count%INT_MAX;
MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
MPI_Type_contiguous(r, oldtype, &remainder);
MPI_Type_get_extent(oldtype, &lb, &extent);
blocklens[0] = 1; blocklens[1] = 1;
disps[0] = 0; disps[1] = c*extent*INT_MAX;
types[0] = chunks; types[1] = remainder;
MPI_Type_create_struct(2, blocklens, disps, types, newtype);
MPI_Type_free(&chunks);
MPI_Type_free(&remainder);
return MPI_SUCCESS;
}
示例10: distribute_matrix
void distribute_matrix(ATYPE *root_matrix, ATYPE *local_matrix, int local_rank, int proc_size, long partition, uint N){
int sendcounts[proc_size], displs[proc_size];
ATYPE *sendbuffer=NULL;
MPI_Datatype MPI_type, MPI_type2;
int rest = N - (partition * ( proc_size - 1) );
MPI_Type_vector(N, 1, N, ATYPE_MPI, &MPI_type2);
MPI_Type_create_resized( MPI_type2, 0, sizeof(ATYPE), &MPI_type);
MPI_Type_commit(&MPI_type);
for ( int i=0 ; i<proc_size ; ++i ){
if ( i == proc_size - 1 ) {
sendcounts[i] = rest;
}
else {
sendcounts[i] = partition;
}
displs[i] = i*partition;
}
if ( local_rank == root )
sendbuffer = &(root_matrix[0]);
MPI_Scatterv( sendbuffer, sendcounts, displs, MPI_type, &(local_matrix[0]), partition*N, ATYPE_MPI, root, MPI_COMM_WORLD );
MPI_Type_free(&MPI_type);
}
示例11: init_mpi
static void init_mpi(void)
{
MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
int dim = 2; //number of dimension
int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
int period[2] = {0,0};//境界条件, 0は固定境界
MPI_Comm grid_comm;
int reorder = 1; //re-distribute rank flag
MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
MPI_Cart_shift(grid_comm, 0, 1, <Rank, &rtRank);
MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);
//プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
int coordinates[2];
MPI_Comm_rank(grid_comm, &rank);
MPI_Cart_coords(grid_comm, rank, 2, coordinates);
SUB_N_X = N_PX / procs[0];
SUB_N_Y = N_PY / procs[1];
SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
SUB_N_PY = SUB_N_Y + 2;
SUB_N_CELL = SUB_N_PX*SUB_N_PY;
offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
offsetY = coordinates[1] * SUB_N_Y;
/*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
示例12: transpose_type
/* Extract an m x n submatrix within an m x N matrix and transpose it.
Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix
with entries of type type */
{
MPI_Datatype subrow, subrow1, submatrix;
MPI_Aint lb, extent;
MPI_Type_vector(m, 1, N, type, &subrow);
MPI_Type_get_extent(type, &lb, &extent);
MPI_Type_create_resized(subrow, 0, extent, &subrow1);
MPI_Type_contiguous(n, subrow1, &submatrix);
MPI_Type_commit(&submatrix);
MPI_Type_free( &subrow );
MPI_Type_free( &subrow1 );
/* Add a consistency test: the size of submatrix should be
n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
{
int tsize;
MPI_Aint textent, llb;
MPI_Type_size( type, &tsize );
MPI_Type_get_true_extent( submatrix, &llb, &textent );
if (textent != tsize * (N * (m-1)+n)) {
fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
(long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
}
}
return(submatrix);
}
示例13: submatrix_type
/* Define an n x m submatrix in a n x M local matrix (this is the
destination in the transpose matrix */
MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
/* computes a datatype for an mxn submatrix within an MxN matrix
with entries of type type */
{
/* MPI_Datatype subrow; */
MPI_Datatype submatrix;
/* The book, MPI: The Complete Reference, has the wrong type constructor
here. Since the stride in the vector type is relative to the input
type, the stride in the book's code is n times as long as is intended.
Since n may not exactly divide N, it is better to simply use the
blocklength argument in Type_vector */
/*
MPI_Type_contiguous(n, type, &subrow);
MPI_Type_vector(m, 1, N, subrow, &submatrix);
*/
MPI_Type_vector(n, m, M, type, &submatrix );
MPI_Type_commit(&submatrix);
/* Add a consistency test: the size of submatrix should be
n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
{
int tsize;
MPI_Aint textent, lb;
MPI_Type_size( type, &tsize );
MPI_Type_get_extent( submatrix, &lb, &textent );
if (textent != tsize * (M * (n-1)+m)) {
fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
(long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
}
}
return(submatrix);
}
示例14: gather_image
void gather_image(){
// MPI type for image gathering
MPI_Datatype image_gathering_t;
MPI_Type_vector(local_image_size[0],
local_image_size[1], local_image_size[1]+2*BORDER, MPI_UNSIGNED_CHAR, &image_gathering_t);
MPI_Type_commit(&image_gathering_t);
MPI_Request req[size];
// gather image data at rank 0
if(rank == 0){
// receive data from all ranks
for(int i = 0; i < size; i++){
// calc offset of these data
int thisCoords[2];
MPI_Cart_coords(cart_comm, i, 2, thisCoords ); // coords of this rank
int offset = thisCoords[0] * local_image_size[0] * image_size[1] + thisCoords[1] * local_image_size[1];
// receive data
MPI_Irecv(&image[offset], 1, image_t, i, 99, cart_comm, req+i);
}
}
// send image data to rank 0
MPI_Send(&F(ITERATIONS,0,0), 1, image_gathering_t, 0, 99, cart_comm);
// wait until all borders are received
if(rank == 0){
MPI_Waitall(size, req, MPI_STATUSES_IGNORE);
}
}
示例15: main
int main(int argc, char **argv) {
int rank;
double a[SIZE][SIZE] = {{0}};
MPI_Datatype columntype;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Type_vector(SIZE, 1, SIZE, MPI_DOUBLE, &columntype);
MPI_Type_commit(&columntype);
if (rank == 0) {
for (int i = 0; i < SIZE; i++)
for (int j = 0; j < SIZE; j++)
a[i][j] = i*SIZE+j;
}
/* only one column is send this is an exemple for non-contignous data*/
MPI_Bcast(a, 1, columntype, 0, MPI_COMM_WORLD);
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
printf("rank= %d, a[%d][%d]=%f\n", rank, i, j, a[i][j]);
}
printf("\n");
}
MPI_Type_free(&columntype);
MPI_Finalize();
return 0;
}