本文整理汇总了C++中MPI_Cart_coords函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Cart_coords函数的具体用法?C++ MPI_Cart_coords怎么用?C++ MPI_Cart_coords使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Cart_coords函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPI_Cart_create
/** Within CART_COMM, processes find about their new rank numbers, their cartesian coordinates,
and their neighbors */
inline void VCtopology3D::setup_vctopology(MPI_Comm old_comm) {
// create a matrix with ranks, and neighbours for fields
MPI_Cart_create(old_comm, 3, divisions, periods, reorder, &CART_COMM);
// create a matrix with ranks, and neighbours for Particles
MPI_Cart_create(old_comm, 3, divisions, periods_P, reorder, &CART_COMM_P);
// field Communicator
if (CART_COMM != MPI_COMM_NULL) {
MPI_Comm_rank(CART_COMM, &cartesian_rank);
MPI_Comm_size(CART_COMM, &nproc);
MPI_Cart_coords(CART_COMM, cartesian_rank, 3, coordinates);
MPI_Cart_shift(CART_COMM, XDIR, RIGHT, &xleft_neighbor, &xright_neighbor);
MPI_Cart_shift(CART_COMM, YDIR, RIGHT, &yleft_neighbor, &yright_neighbor);
MPI_Cart_shift(CART_COMM, ZDIR, RIGHT, &zleft_neighbor, &zright_neighbor);
}
else {
// EXCEPTION
cout << "A process is trown away from the new topology for fields. VCtopology3D.h" << endl;
}
// Particles Communicator
if (CART_COMM_P != MPI_COMM_NULL) {
MPI_Comm_rank(CART_COMM_P, &cartesian_rank);
MPI_Comm_size(CART_COMM, &nproc);
MPI_Cart_coords(CART_COMM_P, cartesian_rank, 3, coordinates);
MPI_Cart_shift(CART_COMM_P, XDIR, RIGHT, &xleft_neighbor_P, &xright_neighbor_P);
MPI_Cart_shift(CART_COMM_P, YDIR, RIGHT, &yleft_neighbor_P, &yright_neighbor_P);
MPI_Cart_shift(CART_COMM_P, ZDIR, RIGHT, &zleft_neighbor_P, &zright_neighbor_P);
}
else {
// EXCEPTION
cout << "A process is trown away from the new topology for Particles. VCtopology3D.h" << endl;
}
}
示例2: create_MPI_cartesian_grid
//define the cartesian grid
void create_MPI_cartesian_grid()
{
#ifdef USE_MPI
coords periods;
for(int mu=0;mu<NDIM;mu++) periods[mu]=1;
MPI_Cart_create(MPI_COMM_WORLD,NDIM,nrank_dir,periods,1,&cart_comm);
//takes rank and ccord of local rank
MPI_Comm_rank(cart_comm,&cart_rank);
MPI_Cart_coords(cart_comm,cart_rank,NDIM,rank_coord);
//create communicator along plan
for(int mu=0;mu<NDIM;mu++)
{
coords split_plan;
coords proj_rank_coord;
for(int nu=0;nu<NDIM;nu++)
{
split_plan[nu]=(nu==mu) ? 0 : 1;
proj_rank_coord[nu]=(nu==mu) ? 0 : rank_coord[nu];
}
MPI_Cart_sub(cart_comm,split_plan,&(plan_comm[mu]));
MPI_Comm_rank(plan_comm[mu],&(plan_rank[mu]));
if(plan_rank[mu]!=rank_of_coord(proj_rank_coord))
crash("Plan communicator has messed up coord: %d and rank %d (implement reorder!)",
rank_of_coord(proj_rank_coord),plan_rank[mu]);
}
//create communicator along line
for(int mu=0;mu<NDIM;mu++)
{
//split the communicator
coords split_line;
memset(split_line,0,sizeof(coords));
split_line[mu]=1;
MPI_Cart_sub(cart_comm,split_line,&(line_comm[mu]));
//get rank id
MPI_Comm_rank(line_comm[mu],&(line_rank[mu]));
//get rank coord along line comm
MPI_Cart_coords(line_comm[mu],line_rank[mu],1,&(line_coord_rank[mu]));
//check communicator
if(line_rank[mu]!=rank_coord[mu] || line_rank[mu]!=line_coord_rank[mu])
crash("Line communicator has messed up coord and rank (implement reorder!)");
}
#else
cart_rank=plan_rank=line_rank=0;
for(int mu=0;mu<NDIM;mu++) rank_coord[mu]=planline_coord[mu]=0;
#endif
}
示例3: init_mpi
static void init_mpi(void)
{
MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
int dim = 2; //number of dimension
int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
int period[2] = {0,0};//境界条件, 0は固定境界
MPI_Comm grid_comm;
int reorder = 1; //re-distribute rank flag
MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
MPI_Cart_shift(grid_comm, 0, 1, <Rank, &rtRank);
MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);
//プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
int coordinates[2];
MPI_Comm_rank(grid_comm, &rank);
MPI_Cart_coords(grid_comm, rank, 2, coordinates);
SUB_N_X = N_PX / procs[0];
SUB_N_Y = N_PY / procs[1];
SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
SUB_N_PY = SUB_N_Y + 2;
SUB_N_CELL = SUB_N_PX*SUB_N_PY;
offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
offsetY = coordinates[1] * SUB_N_Y;
/*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
示例4: main
int
main(int argc, char *argv[])
{
int my_rank;
int comm_sz;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm grid;
int dim[2] = {4, 3};
int period[2] = {1, 0};
int reorder = 1;
int grid_rank = 0;
int coord[2];
MPI_Cart_create(MPI_COMM_WORLD, 2, dim, period, reorder, &grid);
MPI_Comm_rank(MPI_COMM_WORLD, &grid_rank);
MPI_Cart_coords(grid, grid_rank, 2, coord);
fprintf(stdout, "my grid rank is %02d, my grid coordinate is (%d, %d) -- my global rank is %d of %d; \n",
grid_rank, coord[0], coord[1], my_rank, comm_sz);
MPI_Finalize();
return 0;
}
示例5: main
int main(int argc, char *argv[]) {
int SIZE = atoi(argv[1]);
MPI_Status status;
MPI_Comm comm_3d;
int rank, p;
int dims[3], periods[3], coords[3];
dims[0] = dims[1] = dims[2] = periods[0] = periods[1] = periods[2] = 0;
int i;
int *b;
int *my_b = (int *) malloc(SIZE * sizeof(int));
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
// Start the vector that is broadcast.
if (rank == 0){
b = (int *) malloc(SIZE * sizeof(int));
for (i = 0; i < SIZE; i++) my_b[i] = b[i] = i;
}
// Create the mesh.
MPI_Dims_create(p, 3, dims);
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periods, 0, &comm_3d);
// Load the coordinates.
MPI_Cart_coords(comm_3d, rank, 3, coords);
// The first column will start the broadcast along the rows.
double start = MPI_Wtime();
int dim_0_succ, dim_0_pred, dim_1_succ, dim_1_pred, dim_2_succ, dim_2_pred;
dim_0_succ = dim_0_pred = dim_1_succ = dim_1_pred = dim_2_succ = dim_2_pred = 0;
MPI_Cart_shift(comm_3d, 0, 1, &dim_0_pred, &dim_0_succ);
MPI_Cart_shift(comm_3d, 1, 1, &dim_1_pred, &dim_1_succ);
MPI_Cart_shift(comm_3d, 2, 1, &dim_2_pred, &dim_2_succ);
if (coords[0] == 0 && coords[1] == 0 && coords[2]) {
MPI_Send(b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
MPI_Send(b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else if (coords[1] == 0 && coords[2] == 0){
MPI_Recv(my_b, SIZE, MPI_INT, dim_0_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else if (coords[3] == 0){
MPI_Recv(my_b, SIZE, MPI_INT, dim_1_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else {
MPI_Recv(my_b, SIZE, MPI_INT, dim_2_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
}
double end = MPI_Wtime();
if (rank == 0) printf("%d %f\n", SIZE, end - start);
MPI_Finalize();
}
示例6: initial_send
void initial_send(MPI_Comm &Comm_Cart,int rank, float **A_tmp, float **A,
float **B_tmp,float **B, MPI_Status &status, int size, int n){
int mycoords[2] ={0,0};
MPI_Cart_coords(Comm_Cart,rank,2,mycoords);
int a_left_displ, a_right_displ, b_top_displ, b_bot_displ;
a_left_displ=a_right_displ=b_top_displ=b_bot_displ=rank;
// Shifts the initial value of A(i,j) by i steps to the left (in j direction)
MPI_Cart_shift(Comm_Cart, 0, mycoords[1], &b_top_displ, &b_bot_displ);
MPI_Cart_shift(Comm_Cart, 1, mycoords[0], &a_left_displ, &a_right_displ);
float *sendptrA, *recvptrA,*sendptrB, *recvptrB;
sendptrA = &(A[0][0]);
recvptrA = &(A_tmp[0][0]);
sendptrB = &(B[0][0]);
recvptrB = &(B_tmp[0][0]);
// Sends initial values of A to the left
MPI_Sendrecv(sendptrA,n*n, MPI_FLOAT, a_left_displ, lr_tag,
recvptrA,n*n, MPI_FLOAT, a_right_displ, lr_tag,
Comm_Cart, &status);
// Sends initial values of B to the top
MPI_Sendrecv(sendptrB,n*n, MPI_FLOAT, b_top_displ, bt_tag,
recvptrB,n*n, MPI_FLOAT, b_bot_displ, bt_tag,
Comm_Cart, &status);
}
示例7: topo_cartesian
void topo_cartesian(int rank, int *dims, int *coords, int *neigh)
{
int periods[3];
MPI_Comm commcart;
periods[0]=1;
periods[1]=1;
periods[2]=1;
// creation of cartesian communicator
MPI_Cart_create(MPI_COMM_WORLD,3,dims,periods,0,&commcart);
// getting the cartesian position
MPI_Cart_coords(commcart,rank,3,coords);
// getting the neighbors
MPI_Cart_shift(commcart,0,1,neigh+0,neigh+1); //X
MPI_Cart_shift(commcart,1,1,neigh+2,neigh+3); //Y
MPI_Cart_shift(commcart,2,1,neigh+4,neigh+5); //Z
printf(" proc #%d has coordinates %d %d %d and neighbors Xm=%d Xp=%d Ym=%d Yp=%d Zm=%d Zp=%d \n",rank,coords[0],coords[1],coords[2],neigh[0],neigh[1],neigh[2],neigh[3],neigh[4],neigh[5]);
// printf(" dims = %d %d %d\n",dims[0],dims[1],dims[2]);
}
示例8: create_grid
void create_grid(int myrank, int gd,
MPI_Comm* comm_grid, MPI_Comm* comm_row, MPI_Comm* comm_col)
{
int dims[2] = {gd, gd};
int coords[2]; // coords[0] = i, coords[1] = j
int periods[2];
int reorder;
int grid_rank;
int subdivision[2];
periods[0] = 0 ;
periods[1] = 1 ;
reorder = 1 ;
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, comm_grid);
MPI_Cart_coords(*comm_grid, myrank, 2, coords); //Outputs the i,j coordinates of the process
MPI_Cart_rank(*comm_grid, coords, &grid_rank); //Outputs the rank of the process
subdivision[0] = 1;
subdivision[1] = 0;
MPI_Cart_sub (*comm_grid,subdivision,comm_col); // Communicator between lines
subdivision[0] = 0;
subdivision[1] = 1;
MPI_Cart_sub (*comm_grid,subdivision,comm_row); // Communicator between row
}
示例9: fixBins
/** This splits up the particles by their x,y coords and sends them
to their appropriate processes */
void
fixBins(int n_local, particle_t* particles, MPI_Comm comm) {
for (int p = 0; p < P; p++ ) {
int coords[2];
MPI_Cart_coords(comm, p, (int)malloc (2 * sizeof(int)), &coords);
int fixBinSend = 50;
particle_t* keepList = (particle_t *)malloc (n_local * sizeof(particle_t));
int currentIndex=0;
for (int i = 0; i < n_local; i++) {
int x = (particles_local + i) -> x;
int y = (particles_local + i) -> y;
int particleBoxX = x / regionWidth;
int particleBoxY = y / regionWidth;
int rank;
int coords[2] = {particleBoxX, particleBoxY};
MPI_Cart_rank(comm, coords, &rank);
if (x == coords[0] && y == coords[1]) {
keepList[currentIndex] = (particles_local + i);
currentIndex++;
}
}
MPI_Send((particle_t *)keepList, currentIndex, MPI_PARTICLE_T, p, fixBinSend, comm);
}
}
示例10: grid_changed_n_nodes
void grid_changed_n_nodes()
{
int per[3] = { 1, 1, 1 };
GRID_TRACE(fprintf(stderr,"%d: grid_changed_n_nodes:\n",this_node));
MPI_Comm_free(&comm_cart);
MPI_Cart_create(MPI_COMM_WORLD, 3, node_grid, per, 0, &comm_cart);
MPI_Comm_rank(comm_cart, &this_node);
MPI_Cart_coords(comm_cart, this_node, 3, node_pos);
calc_node_neighbors(this_node);
#ifdef GRID_DEBUG
fprintf(stderr,"%d: node_pos=(%d,%d,%d)\n",this_node,node_pos[0],node_pos[1],node_pos[2]);
fprintf(stderr,"%d: node_neighbors=(%d,%d,%d,%d,%d,%d)\n",this_node,
node_neighbors[0],node_neighbors[1],node_neighbors[2],
node_neighbors[3],node_neighbors[4],node_neighbors[5]);
fprintf(stderr,"%d: boundary=(%d,%d,%d,%d,%d,%d)\n",this_node,
boundary[0],boundary[1],boundary[2],boundary[3],boundary[4],boundary[5]);
#endif
grid_changed_box_l();
}
示例11: main
/* A two-dimensional torus of 12 processes in a 4x3 grid */
int main(int argc, char *argv[])
{
int rank, size;
MPI_Comm comm;
int dim[2], period[2], reorder;
int coord[2], id;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 12)
{
printf("Please run with 12 processes.\n");fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, 1);
}
dim[0]=4; dim[1]=3;
period[0]=0; period[1]=1;
reorder=1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dim, period, reorder, &comm);
if (rank == 5)
{
MPI_Cart_coords(comm, rank, 2, coord);
printf("Rank %d coordinates are %d %d\n", rank, coord[0], coord[1]);fflush(stdout);
}
if(rank==0)
{
coord[0]=3; coord[1]=1;
MPI_Cart_rank(comm, coord, &id);
printf("The processor at position (%d, %d) has rank %d\n", coord[0], coord[1], id);fflush(stdout);
}
MPI_Finalize();
return 0;
}
示例12: grid_setup
void grid_setup(struct grid_info *grid) {
/* obtener datos globales */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->nr_world_processes));
MPI_Comm_rank(MPI_COMM_WORLD, &(grid->my_world_rank));
/* calcular cuantos procesos por lado tendra la grilla */
grid->ppside = intsqrt(grid->nr_world_processes);
/* crear comunicador para topologia de grilla */
int dimensions[2] = {grid->ppside, grid->ppside};
int wrap_around[2] = {TRUE, TRUE};
int reorder = TRUE;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, reorder, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
/* obtener coordenadas grillisticas del proceso */
int coordinates[2];
MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* obtener comunicadores para la fila y la columna del proceso */
int free_coords_for_rows[] = {FALSE, TRUE};
int free_coords_for_cols[] = {TRUE, FALSE};
MPI_Cart_sub(grid->comm, free_coords_for_rows, &(grid->row_comm));
MPI_Cart_sub(grid->comm, free_coords_for_cols, &(grid->col_comm));
}
示例13: initMatrices
void initMatrices(double pha[], double phb[], double phc[], int m_ar, int m_br, MPI_Comm comm_cart){
int coords[2];
MPI_Cart_coords(comm_cart, rank, 2, coords);
int Nj = coords[0];
int Ni = coords[1];
long int i,j;
double f = 1.0;
for(i=0; i< m_ar; i++){
for(j=0; j< m_br; j++) {
//pha[i*m_br + j] = (Ni*m_ar + i) == (Nj*m_ar + j) ? 1.0 : 0.0;
pha[i*m_br + j] = f;
f++;
}
}
for(i=0; i< m_br; i++){
for(j=0; j< m_ar; j++) {
phb[i*m_ar + j] = (Ni*m_ar + i) == (Nj*m_ar + j) ? 1.0 : 0.0;
//phb[i*m_ar + j] = (double)(i+1);
}
}
for(i=0; i< m_ar; i++){
for(j=0; j< m_ar; j++) {
phc[i*m_ar + j] = 0;
}
}
}
示例14: gather_image
void gather_image(){
// MPI type for image gathering
MPI_Datatype image_gathering_t;
MPI_Type_vector(local_image_size[0],
local_image_size[1], local_image_size[1]+2*BORDER, MPI_UNSIGNED_CHAR, &image_gathering_t);
MPI_Type_commit(&image_gathering_t);
MPI_Request req[size];
// gather image data at rank 0
if(rank == 0){
// receive data from all ranks
for(int i = 0; i < size; i++){
// calc offset of these data
int thisCoords[2];
MPI_Cart_coords(cart_comm, i, 2, thisCoords ); // coords of this rank
int offset = thisCoords[0] * local_image_size[0] * image_size[1] + thisCoords[1] * local_image_size[1];
// receive data
MPI_Irecv(&image[offset], 1, image_t, i, 99, cart_comm, req+i);
}
}
// send image data to rank 0
MPI_Send(&F(ITERATIONS,0,0), 1, image_gathering_t, 0, 99, cart_comm);
// wait until all borders are received
if(rank == 0){
MPI_Waitall(size, req, MPI_STATUSES_IGNORE);
}
}
示例15: neighbour_table
/* Function that creates a list of the neighbours for each processes */
inline void neighbour_table(int* neighbours, MPI_Comm grid, int proc_rank){
int move, id;
int coord[2];
id = 1;
// move from a proc to an other ==> move = 1
move = 1;
// get Left and Right
MPI_Cart_shift(grid, id, move, &neighbours[L], &neighbours[R]);
id = 0;
// get Up and Down
MPI_Cart_shift(grid, id, move, &neighbours[U], &neighbours[D]);
// get current proc coordinates
MPI_Cart_coords(grid, proc_rank, 2, coord);
coord[0]--;
coord[1]--;
// determine Up-Left neighbour
MPI_Cart_rank(grid, coord, &neighbours[UL]);
coord[1]+=2;
// determine Up-Right neighbour
MPI_Cart_rank(grid, coord, &neighbours[UR]);
coord[0]+=2;
// determine Down-Right neighbour
MPI_Cart_rank(grid, coord, &neighbours[LR]);
coord[1]-=2;
// determine Down-Left neighbour
MPI_Cart_rank(grid, coord, &neighbours[LL]);
return;
}