本文整理汇总了C++中MPI_Cart_shift函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Cart_shift函数的具体用法?C++ MPI_Cart_shift怎么用?C++ MPI_Cart_shift使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Cart_shift函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ParallelCalc
void ParallelCalc()
{
double max, allmax;
while (true)
{
max = 0;
for (int i = 1; i < K + 1; i++)
for (int j = 1; j < N + 1; j++)
{
double u0 = u1[i * (N + 2) + j];
u1[i * (N + 2) + j] = 0.25 * (u1[(i - 1) * (N + 2) + j]
+ u1[(i + 1) * (N + 2) + j] + u1[i * (N + 2) + j - 1]
+ u1[i * (N + 2) + j + 1] - h * h * f1[(i - 1) * N + j - 1]);
double d = abs(u1[i * (N + 2) + j] - u0);
if (d > max)
max = d;
}
MPI_Allreduce(&max, &allmax, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD);
if (allmax <= eps)
break;
int RSrc, RDest;
MPI_Status s;
MPI_Cart_shift(BAND_COMM, 0, 1, &RSrc, &RDest);
MPI_Sendrecv(&u1[K * (N + 2)], N + 2, MPI_DOUBLE, RDest, 0, u1, N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM, &s);
MPI_Cart_shift(BAND_COMM, 0, -1, &RSrc, &RDest);
MPI_Sendrecv(&u1[N + 2], N + 2, MPI_DOUBLE, RDest, 0, &u1[(K + 1) * (N + 2)], N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM,&s);
}
}
示例2: init_mpi
static void init_mpi(void)
{
MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
int dim = 2; //number of dimension
int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
int period[2] = {0,0};//境界条件, 0は固定境界
MPI_Comm grid_comm;
int reorder = 1; //re-distribute rank flag
MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
MPI_Cart_shift(grid_comm, 0, 1, <Rank, &rtRank);
MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);
//プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
int coordinates[2];
MPI_Comm_rank(grid_comm, &rank);
MPI_Cart_coords(grid_comm, rank, 2, coordinates);
SUB_N_X = N_PX / procs[0];
SUB_N_Y = N_PY / procs[1];
SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
SUB_N_PY = SUB_N_Y + 2;
SUB_N_CELL = SUB_N_PX*SUB_N_PY;
offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
offsetY = coordinates[1] * SUB_N_Y;
/*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
示例3: topo_cartesian
void topo_cartesian(int rank, int *dims, int *coords, int *neigh)
{
int periods[3];
MPI_Comm commcart;
periods[0]=1;
periods[1]=1;
periods[2]=1;
// creation of cartesian communicator
MPI_Cart_create(MPI_COMM_WORLD,3,dims,periods,0,&commcart);
// getting the cartesian position
MPI_Cart_coords(commcart,rank,3,coords);
// getting the neighbors
MPI_Cart_shift(commcart,0,1,neigh+0,neigh+1); //X
MPI_Cart_shift(commcart,1,1,neigh+2,neigh+3); //Y
MPI_Cart_shift(commcart,2,1,neigh+4,neigh+5); //Z
printf(" proc #%d has coordinates %d %d %d and neighbors Xm=%d Xp=%d Ym=%d Yp=%d Zm=%d Zp=%d \n",rank,coords[0],coords[1],coords[2],neigh[0],neigh[1],neigh[2],neigh[3],neigh[4],neigh[5]);
// printf(" dims = %d %d %d\n",dims[0],dims[1],dims[2]);
}
示例4: neighbour_table
/* Function that creates a list of the neighbours for each processes */
inline void neighbour_table(int* neighbours, MPI_Comm grid, int proc_rank){
int move, id;
int coord[2];
id = 1;
// move from a proc to an other ==> move = 1
move = 1;
// get Left and Right
MPI_Cart_shift(grid, id, move, &neighbours[L], &neighbours[R]);
id = 0;
// get Up and Down
MPI_Cart_shift(grid, id, move, &neighbours[U], &neighbours[D]);
// get current proc coordinates
MPI_Cart_coords(grid, proc_rank, 2, coord);
coord[0]--;
coord[1]--;
// determine Up-Left neighbour
MPI_Cart_rank(grid, coord, &neighbours[UL]);
coord[1]+=2;
// determine Up-Right neighbour
MPI_Cart_rank(grid, coord, &neighbours[UR]);
coord[0]+=2;
// determine Down-Right neighbour
MPI_Cart_rank(grid, coord, &neighbours[LR]);
coord[1]-=2;
// determine Down-Left neighbour
MPI_Cart_rank(grid, coord, &neighbours[LL]);
return;
}
示例5: initial_send
void initial_send(MPI_Comm &Comm_Cart,int rank, float **A_tmp, float **A,
float **B_tmp,float **B, MPI_Status &status, int size, int n){
int mycoords[2] ={0,0};
MPI_Cart_coords(Comm_Cart,rank,2,mycoords);
int a_left_displ, a_right_displ, b_top_displ, b_bot_displ;
a_left_displ=a_right_displ=b_top_displ=b_bot_displ=rank;
// Shifts the initial value of A(i,j) by i steps to the left (in j direction)
MPI_Cart_shift(Comm_Cart, 0, mycoords[1], &b_top_displ, &b_bot_displ);
MPI_Cart_shift(Comm_Cart, 1, mycoords[0], &a_left_displ, &a_right_displ);
float *sendptrA, *recvptrA,*sendptrB, *recvptrB;
sendptrA = &(A[0][0]);
recvptrA = &(A_tmp[0][0]);
sendptrB = &(B[0][0]);
recvptrB = &(B_tmp[0][0]);
// Sends initial values of A to the left
MPI_Sendrecv(sendptrA,n*n, MPI_FLOAT, a_left_displ, lr_tag,
recvptrA,n*n, MPI_FLOAT, a_right_displ, lr_tag,
Comm_Cart, &status);
// Sends initial values of B to the top
MPI_Sendrecv(sendptrB,n*n, MPI_FLOAT, b_top_displ, bt_tag,
recvptrB,n*n, MPI_FLOAT, b_bot_displ, bt_tag,
Comm_Cart, &status);
}
示例6: boundary
void boundary(GRID_INFO_TYPE* grid, int spin[][LENGTH], int nbr1[] , int nbr2[])
{
int i,*U2,*D2,*R2,*L2,m,n,tag=50;
U2=(int *)malloc(LENGTH*sizeof(int));
D2=(int *)malloc(LENGTH*sizeof(int));
R2=(int *)malloc(LENGTH*sizeof(int));
L2=(int *)malloc(LENGTH*sizeof(int));
/* put the spins at the left, right, up and down boundaries into separate arrays */
for (i = 1 ; i < LENGTH-1 ; i++)
{ U2[i]=spin[i][1];
D2[i]=spin[i][LENGTH-2];
L2[i]=spin[1][i];
R2[i]=spin[LENGTH-2][i];
}
/*
send the boundary arrays to appropriate neighbor process
Remeber 0 is up and down and 1 is left and right.
*/
MPI_Cart_shift(grid->comm,0,-1,&m,&n); /* find the neighbor process down the current one */
MPI_Send(U2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD); /* send U2 buffer with dimension LENGTH of type MPI_INT to process "n" with "tag" */
MPI_Cart_shift(grid->comm,0,1,&m,&n);
MPI_Send(D2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
MPI_Cart_shift(grid->comm,1,1,&m,&n);
MPI_Send(R2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
MPI_Cart_shift(grid->comm,1,-1,&m,&n);
MPI_Send(L2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
free(U2);
free(D2);
free(L2);
free(R2);
}
示例7: calc_node_neighbors
void calc_node_neighbors(int node)
{
int dir;
map_node_array(node,node_pos);
for(dir=0;dir<3;dir++) {
int buf;
MPI_Cart_shift(comm_cart, dir, -1, &buf, node_neighbors + 2*dir);
MPI_Cart_shift(comm_cart, dir, 1, &buf, node_neighbors + 2*dir + 1);
/* left boundary ? */
if (node_pos[dir] == 0) {
boundary[2*dir] = 1;
}
else {
boundary[2*dir] = 0;
}
/* right boundary ? */
if (node_pos[dir] == node_grid[dir]-1) {
boundary[2*dir+1] = -1;
}
else {
boundary[2*dir+1] = 0;
}
}
GRID_TRACE(printf("%d: node_grid %d %d %d, pos %d %d %d, node_neighbors ", this_node, node_grid[0], node_grid[1], node_grid[2], node_pos[0], node_pos[1], node_pos[2]));
}
示例8: collectAfterPre
void collectAfterPre(Vector u, const Vector v)
{
int source, dest;
if (u->comm_rank == 0) {
int len=u->len-1;
dcopy(&len, v->data, &v->stride, u->data+1, &u->stride);
} else if (u->comm_rank == u->comm_size-1) {
int len=v->len-1;
dcopy(&len, v->data+1, &v->stride, u->data+1, &u->stride);
} else
copyVector(u, v);
// west
double recv;
MPI_Cart_shift(*u->comm, 0, -1, &source, &dest);
MPI_Sendrecv(v->data, 1, MPI_DOUBLE, dest, 0,
u->data, 1, MPI_DOUBLE, source, 0, *u->comm, MPI_STATUS_IGNORE);
if (source > -1)
u->data[u->len-2] += u->data[0];
// east
MPI_Cart_shift(*u->comm, 0, 1, &source, &dest);
MPI_Sendrecv(v->data+v->len-1, 1, MPI_DOUBLE, dest, 1,
u->data, 1, MPI_DOUBLE, source, 1, *u->comm, MPI_STATUS_IGNORE);
if (source > -1)
u->data[1] += u->data[0];
u->data[0] = u->data[u->len-1] = 0.0;
}
示例9: main
int main(int argc, char *argv[]) {
int SIZE = atoi(argv[1]);
MPI_Status status;
MPI_Comm comm_3d;
int rank, p;
int dims[3], periods[3], coords[3];
dims[0] = dims[1] = dims[2] = periods[0] = periods[1] = periods[2] = 0;
int i;
int *b;
int *my_b = (int *) malloc(SIZE * sizeof(int));
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
// Start the vector that is broadcast.
if (rank == 0){
b = (int *) malloc(SIZE * sizeof(int));
for (i = 0; i < SIZE; i++) my_b[i] = b[i] = i;
}
// Create the mesh.
MPI_Dims_create(p, 3, dims);
MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periods, 0, &comm_3d);
// Load the coordinates.
MPI_Cart_coords(comm_3d, rank, 3, coords);
// The first column will start the broadcast along the rows.
double start = MPI_Wtime();
int dim_0_succ, dim_0_pred, dim_1_succ, dim_1_pred, dim_2_succ, dim_2_pred;
dim_0_succ = dim_0_pred = dim_1_succ = dim_1_pred = dim_2_succ = dim_2_pred = 0;
MPI_Cart_shift(comm_3d, 0, 1, &dim_0_pred, &dim_0_succ);
MPI_Cart_shift(comm_3d, 1, 1, &dim_1_pred, &dim_1_succ);
MPI_Cart_shift(comm_3d, 2, 1, &dim_2_pred, &dim_2_succ);
if (coords[0] == 0 && coords[1] == 0 && coords[2]) {
MPI_Send(b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
MPI_Send(b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else if (coords[1] == 0 && coords[2] == 0){
MPI_Recv(my_b, SIZE, MPI_INT, dim_0_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else if (coords[3] == 0){
MPI_Recv(my_b, SIZE, MPI_INT, dim_1_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
} else {
MPI_Recv(my_b, SIZE, MPI_INT, dim_2_pred, 0, MPI_COMM_WORLD, &status);
MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
}
double end = MPI_Wtime();
if (rank == 0) printf("%d %f\n", SIZE, end - start);
MPI_Finalize();
}
示例10: MatrixMatrixMultiply
void MatrixMatrixMultiply(double ***a, double ***b, double ***c, int mra, int
mca, int mrb, int mcb, int *ra, int *ca, int *rb, int *cb, MPI_Comm
comm)
{
/*from the teaching book */
int i, j;
int num_procs, dims[2], periods[2];
int myrank, my2drank, mycoords[2];
int uprank, downrank, leftrank, rightrank, coords[2];
int shiftsource, shiftdest;
MPI_Status status;
MPI_Comm comm_2d;
MPI_Comm_size(comm, &num_procs);
MPI_Comm_rank(comm_2d, &my2drank);
dims[0] = dims[1] = 0;
MPI_Dims_create(num_procs, 2, dims);
periods[0]= periods[1] = 1;
MPI_Cart_create(comm, 2, dims, periods, 1, &comm_2d);
MPI_Comm_rank(comm_2d, &my2drank);
MPI_Cart_coords(comm_2d, my2drank, 2, mycoords);
MPI_Cart_shift(comm_2d, 1, -1, &rightrank, &leftrank);
MPI_Cart_shift(comm_2d, 0, -1, &downrank, &uprank);
int ia = my2drank;
int ib = my2drank;
MPI_Cart_shift(comm_2d, 1, -mycoords[0], &shiftsource, &shiftdest);
MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, shiftdest, 1,
shiftsource, 1, comm_2d, &status);
MPI_Sendrecv_replace(&ia, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
comm_2d, &status);
MPI_Cart_shift(comm_2d, 0, -mycoords[1], &shiftsource, &shiftdest);
MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, shiftdest, 1,
shiftsource, 1, comm_2d, &status);
MPI_Sendrecv_replace(&ib, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
comm_2d, &status);
for (i=0; i<dims[0]; i++){
MatrixMultiply(ra[ia], ca[ia], rb[ib], cb[ib], *a, *b, c); /* c=c + a*b */
MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, leftrank, 1,
rightrank, 1, comm_2d, &status);
MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, uprank, 1, downrank,
1, comm_2d, &status);
MPI_Sendrecv_replace(&ia, 1, MPI_INT, leftrank, 1, rightrank, 1,
comm_2d, &status);
MPI_Sendrecv_replace(&ib, 1, MPI_INT, uprank, 1, downrank, 1,
comm_2d, &status);
}
MPI_Comm_free(&comm_2d);
}
示例11: main
int main (int argc, char** argv)
{
int num_tasks;
char hostname[80];
int dims[DIM];
dims[0] = DIM_0;
dims[1] = DIM_1;
dims[2] = DIM_2;
int periods[DIM] = {false, false, false};
int reorder = true;
int my_rank;
int coords[DIM];
MPI_Comm cartcomm, y_comm;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
if (num_tasks != SIZE) {
if (my_rank == 0) {
printf("We need %d proccesses, %d given. Exiting.\n", SIZE, num_tasks);
}
MPI_Finalize();
return 0;
}
gethostname(hostname, 79);
MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &cartcomm);
MPI_Cart_coords(cartcomm, my_rank, DIM, coords);
printf("%-15.12s: MPI_COMM_WORLD rank %2d: (%d, %d, %d)\n", hostname, my_rank, coords[0], coords[1], coords[2]);
//neighbors
int src, dest;
for (int i = 0; i < 3; i++) {
MPI_Cart_shift(cartcomm, i, +1, src, dest);
printf("i am %d and my right neighbor in %d is %d", dest, i, src);
MPI_Cart_shift(cartcomm, i, -1, src, dest);
printf("i am %d and my left neighbor in %d is %d", dest, i, src);
}
int keep_dims[1];
keep_dims[0] = 1;
MPI_Cart_sub(cartcomm, keep_dims, &y_comm);
printf("%d: my y rank is %d", my_rank, coords[1]);
MPI_Finalize();
return 0;
}
示例12: send_to
/*
* int direction (0 horizontal, 1 vertical)
* int distance
*/
void send_to(MPI_Comm *comm, int direction, float *A, int size, int row, int column, int n) {
int prev_rank, next_rank;
int distance = 1;
MPI_Cart_shift(*comm, direction, distance, &prev_rank, &next_rank);
while(next_rank >= 0) {
MPI_Send(A + row * n + column, size, MPI_FLOAT, next_rank, 0, *comm);
MPI_Cart_shift(*comm, direction, ++distance, &prev_rank, &next_rank);
}
}
示例13: main
int main(int argc, char *argv[])
{
int *matrix_a;
int *matrix_b;
int *matrix_c;
const char *matrix_a_filename = argv[1];
const char *matrix_b_filename = argv[2];
const char *matrix_c_filename = argv[3];
MPI_Comm matrix_comm;
MPI_Init(&argc, &argv);
create_matrix_comm(MPI_COMM_WORLD, &matrix_comm);
MPI_Comm_size(matrix_comm, &size);
MPI_Comm_rank(matrix_comm, &rank);
compute_matrixes_variables(matrix_a_filename, matrix_comm);
alloc_submatrix_buffer(&matrix_a);
alloc_submatrix_buffer(&matrix_b);
alloc_submatrix_buffer(&matrix_c);
distribute_matrix(matrix_a_filename, matrix_a, matrix_comm);
distribute_matrix(matrix_b_filename, matrix_b, matrix_comm);
/* The actual cannon algorithms */
int row_source, row_dst;
int col_source, col_dst;
MPI_Cart_shift(matrix_comm, 0, -1, &row_source, &row_dst);
MPI_Cart_shift(matrix_comm, 1, -1, &col_source, &col_dst);
int i;
for (i = 0; i < pp_dims; i++) {
compute_matrix_mul(matrix_a, matrix_b, matrix_c, N);
MPI_Sendrecv_replace(matrix_a, sub_n * sub_n, MPI_INT,
row_source, MPI_ANY_TAG, row_dst, MPI_ANY_TAG,
matrix_comm, MPI_STATUS_IGNORE);
MPI_Sendrecv_replace(matrix_b, sub_n * sub_n, MPI_INT,
col_source, MPI_ANY_TAG, col_dst, MPI_ANY_TAG,
matrix_comm, MPI_STATUS_IGNORE);
}
write_result(matrix_c_filename, matrix_c, matrix_comm);
free(matrix_a);
free(matrix_b);
free(matrix_c);
MPI_Comm_free(&matrix_comm);
MPI_Finalize();
return 0;
}
示例14: heatMPISetup
/******************************************************
* Function to setup MPI data.
*
* (1) Initializes MPI
* (2) Creates a cartesian communicator for border exchange
* (3) Distributes the overall grid to the processes
* (4) Sets up helpful data-type and MPI buffer
*
******************************************************/
void heatMPISetup (int* pargc, char*** pargv, heatGrid *grid, dataMPI* configMPI)
{
int size,
dims[2] = {0,0},
periods[2] = {1,1},
coords[2];
int buf_size;
char *buf;
/* ==== (1) ==== */
/* Base init*/
MPI_Init (pargc, pargv);
MPI_Comm_rank (MPI_COMM_WORLD, &configMPI->rank);
MPI_Comm_size (MPI_COMM_WORLD, &size);
/* ==== (2) ==== */
/* Create cartesian communicator*/
MPI_Dims_create (size, 2, dims);
MPI_Cart_create (MPI_COMM_WORLD, 2, dims, periods, 0, &configMPI->cart);
/* Store neighbors in the grid */
MPI_Cart_shift (configMPI->cart, 0, 1, &configMPI->left, &configMPI->right);
MPI_Cart_shift (configMPI->cart, 1, 1, &configMPI->up, &configMPI->down);
/* ==== (3) ==== */
/* Distribute overall grid to processes */
MPI_Cart_coords (configMPI->cart, configMPI->rank, 2, coords); /*My coordinate*/
configMPI->start_x = 1 + (grid->xsize/dims[0])*coords[0];
if (coords[0]+1 != dims[0])
/* coords 0 to N-1 get an equal distribution*/
configMPI->num_cells_x = grid->xsize / (dims[0]);
else
/* last coord gets the rest */
configMPI->num_cells_x = grid->xsize - configMPI->start_x + 1;
configMPI->start_y = 1 + (grid->ysize/dims[1])*coords[1];
if (coords[1]+1 != dims[1])
/* coords 0 to N-1 get an equal distribution*/
configMPI->num_cells_y = grid->ysize / (dims[1]);
else
/* last coord gets the rest */
configMPI->num_cells_y = grid->ysize - configMPI->start_y + 1;
/* ==== (4) ==== */
/* Create datatype to communicate one column */
MPI_Type_vector (
configMPI->num_cells_y, /* #blocks */
1, /* #elements per block */
grid->xsize+2, /* #stride */
MPI_DOUBLE, /* old type */
&configMPI->columntype /* new type */ );
MPI_Type_commit (&configMPI->columntype);
}
示例15: main_replaced
int main_replaced(int argc, char** argv){
//Initialization
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//Reading image
if(rank == 0){
image = read_bmp("Lenna_blur.bmp");
}
//Creating cartesian communicator
MPI_Dims_create(size, 2, dims);
MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &cart_comm );
MPI_Cart_coords( cart_comm, rank, 2, coords );
MPI_Cart_shift( cart_comm, 0, 1, &north, &south );
MPI_Cart_shift( cart_comm, 1, 1, &west, &east );
local_image_size[0] = image_size[0]/dims[0];
local_image_size[1] = image_size[1]/dims[1];
//Allocating buffers
int lsize = local_image_size[0]*local_image_size[1];
int lsize_border = (local_image_size[0] + 2*BORDER)*(local_image_size[1] + 2*BORDER);
local_image_orig = (unsigned char*)malloc(sizeof(unsigned char)*lsize);
local_image[0] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));
local_image[1] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));
create_types();
distribute_image();
initialilze_guess();
//Main loop
for(int i = 0; i < ITERATIONS; i++){
exchange_borders(i);
perform_convolution(i);
}
gather_image();
MPI_Finalize();
//Write image
if(rank==0){
write_bmp(image, image_size[0], image_size[1]);
}
exit(0);
}