本文整理汇总了C++中MPI_Get函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Get函数的具体用法?C++ MPI_Get怎么用?C++ MPI_Get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
int rank, nproc;
int errors = 0, all_errors = 0;
int buf, my_buf;
MPI_Win win;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Win_create(&buf, sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &win);
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
MPI_Win_fence(0, win);
MPI_Win_lock(MPI_LOCK_SHARED, 0, MPI_MODE_NOCHECK, win);
MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win);
MPI_Win_unlock(0, win);
/* This should fail because the window is no longer in a fence epoch */
CHECK_ERR(MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win));
MPI_Win_fence(0, win);
MPI_Win_free(&win);
MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0 && all_errors == 0) printf(" No Errors\n");
MPI_Finalize();
return 0;
}
示例2: MPI_Comm_rank
void MPIMutex::lock(int proc) {
int rank, nproc, already_locked;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &nproc);
//std::cout << "trying to get lock" << std::endl;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, proc, 0, win);
byte *buff = (byte*)malloc(sizeof(byte)*nproc);
buff[rank] = 1;
MPI_Put(&(buff[rank]), 1, MPI_BYTE, proc, rank, 1, MPI_BYTE, win);
/* Get data to the left of rank */
if (rank > 0) {
MPI_Get(buff, rank, MPI_BYTE, proc, 0, rank, MPI_BYTE, win);
}
/* Get data to the right of rank */
if (rank < nproc - 1) {
MPI_Get(&(buff[rank+1]), nproc-1-rank, MPI_BYTE, proc, rank+1, nproc-1-rank,
MPI_BYTE, win);
}
MPI_Win_unlock(proc, win);
/* check if anyone has the lock*/
for (int i = already_locked = 0; i < nproc; i++)
if (buff[i] && i != rank)
already_locked = 1;
/* Wait for notification */
if (already_locked) {
MPI_Status status;
//std::cout << "waiting for notification [proc = "<<proc<<"]" << std::endl;
MPI_Recv(NULL, 0, MPI_BYTE, MPI_ANY_SOURCE, MPI_MUTEX_TAG+id, comm, &status);
}
//std::cout << "lock acquired [proc = "<<proc<<"]" << std::endl;
free(buff);
};
示例3: do_test
int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
MPI_Datatype result_type, int target_count, MPI_Datatype target_type)
{
int errs = 0, ret, origin_type_size, result_type_size;
ret = MPI_Put(origin_buf, origin_count, origin_type, 1, 0, target_count, target_type, win);
if (ret)
errs++;
ret = MPI_Get(origin_buf, origin_count, origin_type, 1, 0, target_count, target_type, win);
if (ret)
errs++;
ret = MPI_Accumulate(origin_buf, origin_count, origin_type, 1, 0, target_count,
target_type, MPI_SUM, win);
if (ret)
errs++;
ret = MPI_Get_accumulate(origin_buf, origin_count, origin_type, result_buf, result_count,
result_type, 1, 0, target_count, target_type, MPI_SUM, win);
if (ret)
errs++;
MPI_Type_size(origin_type, &origin_type_size);
MPI_Type_size(result_type, &result_type_size);
if (origin_count == 0 || origin_type_size == 0) {
ret = MPI_Put(NULL, origin_count, origin_type, 1, 0, target_count, target_type, win);
if (ret)
errs++;
ret = MPI_Get(NULL, origin_count, origin_type, 1, 0, target_count, target_type, win);
if (ret)
errs++;
ret = MPI_Accumulate(NULL, origin_count, origin_type, 1, 0, target_count, target_type,
MPI_SUM, win);
if (ret)
errs++;
ret = MPI_Get_accumulate(NULL, origin_count, origin_type, result_buf, result_count,
result_type, 1, 0, target_count, target_type, MPI_SUM, win);
if (ret)
errs++;
if (result_count == 0 || result_type_size == 0) {
ret = MPI_Get_accumulate(NULL, origin_count, origin_type, NULL, result_count,
result_type, 1, 0, target_count, target_type, MPI_SUM, win);
if (ret)
errs++;
}
}
return errs;
}
示例4: MPIX_Mutex_lock
/** Lock a mutex.
*
* @param[in] hdl Mutex group that the mutex belongs to
* @param[in] mutex Desired mutex number [0..count-1]
* @param[in] proc Rank of process where the mutex lives
* @return MPI status
*/
int MPIX_Mutex_lock(MPIX_Mutex hdl, int mutex, int proc)
{
int rank, nproc, already_locked, i;
uint8_t *buf;
assert(mutex >= 0 && mutex < hdl->max_count);
MPI_Comm_rank(hdl->comm, &rank);
MPI_Comm_size(hdl->comm, &nproc);
assert(proc >= 0 && proc < nproc);
buf = malloc(nproc * sizeof(uint8_t));
assert(buf != NULL);
buf[rank] = 1;
/* Get all data from the lock_buf, except the byte belonging to
* me. Set the byte belonging to me to 1. */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, proc, 0, hdl->windows[mutex]);
MPI_Put(&buf[rank], 1, MPI_BYTE, proc, rank, 1, MPI_BYTE, hdl->windows[mutex]);
/* Get data to the left of rank */
if (rank > 0) {
MPI_Get(buf, rank, MPI_BYTE, proc, 0, rank, MPI_BYTE, hdl->windows[mutex]);
}
/* Get data to the right of rank */
if (rank < nproc - 1) {
MPI_Get(&buf[rank + 1], nproc - 1 - rank, MPI_BYTE, proc, rank + 1, nproc - 1 - rank,
MPI_BYTE, hdl->windows[mutex]);
}
MPI_Win_unlock(proc, hdl->windows[mutex]);
assert(buf[rank] == 1);
for (i = already_locked = 0; i < nproc; i++)
if (buf[i] && i != rank)
already_locked = 1;
/* Wait for notification */
if (already_locked) {
MPI_Status status;
debug_print("waiting for notification [proc = %d, mutex = %d]\n", proc, mutex);
MPI_Recv(NULL, 0, MPI_BYTE, MPI_ANY_SOURCE, MPIX_MUTEX_TAG + mutex, hdl->comm, &status);
}
debug_print("lock acquired [proc = %d, mutex = %d]\n", proc, mutex);
free(buf);
return MPI_SUCCESS;
}
示例5: MPIX_Mutex_unlock
/** Unlock a mutex.
*
* @param[in] hdl Mutex group that the mutex belongs to.
* @param[in] mutex Desired mutex number [0..count-1]
* @param[in] proc Rank of process where the mutex lives
* @return MPI status
*/
int MPIX_Mutex_unlock(MPIX_Mutex hdl, int mutex, int proc)
{
int rank, nproc, i;
uint8_t *buf;
assert(mutex >= 0 && mutex < hdl->max_count);
MPI_Comm_rank(hdl->comm, &rank);
MPI_Comm_size(hdl->comm, &nproc);
assert(proc >= 0 && proc < nproc);
buf = malloc(nproc * sizeof(uint8_t));
assert(buf != NULL);
buf[rank] = 0;
/* Get all data from the lock_buf, except the byte belonging to
* me. Set the byte belonging to me to 0. */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, proc, 0, hdl->windows[mutex]);
MPI_Put(&buf[rank], 1, MPI_BYTE, proc, rank, 1, MPI_BYTE, hdl->windows[mutex]);
/* Get data to the left of rank */
if (rank > 0) {
MPI_Get(buf, rank, MPI_BYTE, proc, 0, rank, MPI_BYTE, hdl->windows[mutex]);
}
/* Get data to the right of rank */
if (rank < nproc - 1) {
MPI_Get(&buf[rank + 1], nproc - 1 - rank, MPI_BYTE, proc, rank + 1, nproc - 1 - rank,
MPI_BYTE, hdl->windows[mutex]);
}
MPI_Win_unlock(proc, hdl->windows[mutex]);
assert(buf[rank] == 0);
/* Notify the next waiting process, starting to my right for fairness */
for (i = 1; i < nproc; i++) {
int p = (rank + i) % nproc;
if (buf[p] == 1) {
debug_print("notifying %d [proc = %d, mutex = %d]\n", p, proc, mutex);
MPI_Send(NULL, 0, MPI_BYTE, p, MPI_MUTEX_TAG + mutex, hdl->comm);
break;
}
}
debug_print("lock released [proc = %d, mutex = %d]\n", proc, mutex);
free(buf);
return MPI_SUCCESS;
}
示例6: ARMCIX_Lock_hdl
/** Lock a mutex.
*
* @param[in] hdl Mutex group that the mutex belongs to.
* @param[in] mutex Desired mutex number [0..count-1]
* @param[in] world_proc Absolute ID of process where the mutex lives
*/
void ARMCIX_Lock_hdl(armcix_mutex_hdl_t hdl, int mutex, int world_proc) {
int rank, nproc, already_locked, i, proc;
uint8_t *buf;
ARMCII_Assert(mutex >= 0 && mutex < hdl->max_count);
MPI_Comm_rank(hdl->grp.comm, &rank);
MPI_Comm_size(hdl->grp.comm, &nproc);
/* User gives us the absolute ID. Translate to the rank in the mutex's group. */
proc = ARMCII_Translate_absolute_to_group(&hdl->grp, world_proc);
ARMCII_Assert(proc >= 0);
buf = malloc(nproc*sizeof(uint8_t));
ARMCII_Assert(buf != NULL);
buf[rank] = 1;
/* Get all data from the lock_buf, except the byte belonging to
* me. Set the byte belonging to me to 1. */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, proc, 0, hdl->windows[mutex]);
MPI_Put(&buf[rank], 1, MPI_BYTE, proc, rank, 1, MPI_BYTE, hdl->windows[mutex]);
/* Get data to the left of rank */
if (rank > 0) {
MPI_Get(buf, rank, MPI_BYTE, proc, 0, rank, MPI_BYTE, hdl->windows[mutex]);
}
/* Get data to the right of rank */
if (rank < nproc - 1) {
MPI_Get(&buf[rank+1], nproc-1-rank, MPI_BYTE, proc, rank + 1, nproc-1-rank, MPI_BYTE, hdl->windows[mutex]);
}
MPI_Win_unlock(proc, hdl->windows[mutex]);
ARMCII_Assert(buf[rank] == 1);
for (i = already_locked = 0; i < nproc; i++)
if (buf[i] && i != rank)
already_locked = 1;
/* Wait for notification */
if (already_locked) {
MPI_Status status;
ARMCII_Dbg_print(DEBUG_CAT_MUTEX, "waiting for notification [proc = %d, mutex = %d]\n", proc, mutex);
MPI_Recv(NULL, 0, MPI_BYTE, MPI_ANY_SOURCE, ARMCI_MUTEX_TAG+mutex, hdl->grp.comm, &status);
}
ARMCII_Dbg_print(DEBUG_CAT_MUTEX, "lock acquired [proc = %d, mutex = %d]\n", proc, mutex);
free(buf);
}
示例7: ARMCIX_Unlock_hdl
/** Unlock a mutex.
*
* @param[in] hdl Mutex group that the mutex belongs to.
* @param[in] mutex Desired mutex number [0..count-1]
* @param[in] world_proc Absolute ID of process where the mutex lives
*/
void ARMCIX_Unlock_hdl(armcix_mutex_hdl_t hdl, int mutex, int world_proc) {
int rank, nproc, i, proc;
uint8_t *buf;
ARMCII_Assert(mutex >= 0 && mutex < hdl->max_count);
MPI_Comm_rank(hdl->grp.comm, &rank);
MPI_Comm_size(hdl->grp.comm, &nproc);
proc = ARMCII_Translate_absolute_to_group(&hdl->grp, world_proc);
ARMCII_Assert(proc >= 0);
buf = malloc(nproc*sizeof(uint8_t));
buf[rank] = 0;
/* Get all data from the lock_buf, except the byte belonging to
* me. Set the byte belonging to me to 0. */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, proc, 0, hdl->windows[mutex]);
MPI_Put(&buf[rank], 1, MPI_BYTE, proc, rank, 1, MPI_BYTE, hdl->windows[mutex]);
/* Get data to the left of rank */
if (rank > 0) {
MPI_Get(buf, rank, MPI_BYTE, proc, 0, rank, MPI_BYTE, hdl->windows[mutex]);
}
/* Get data to the right of rank */
if (rank < nproc - 1) {
MPI_Get(&buf[rank+1], nproc-1-rank, MPI_BYTE, proc, rank + 1, nproc-1-rank, MPI_BYTE, hdl->windows[mutex]);
}
MPI_Win_unlock(proc, hdl->windows[mutex]);
ARMCII_Assert(buf[rank] == 0);
/* Notify the next waiting process, starting to my right for fairness */
for (i = 1; i < nproc; i++) {
int p = (rank + i) % nproc;
if (buf[p] == 1) {
ARMCII_Dbg_print(DEBUG_CAT_MUTEX, "notifying %d [proc = %d, mutex = %d]\n", p, proc, mutex);
MPI_Send(NULL, 0, MPI_BYTE, p, ARMCI_MUTEX_TAG+mutex, hdl->grp.comm);
break;
}
}
ARMCII_Dbg_print(DEBUG_CAT_MUTEX, "lock released [proc = %d, mutex = %d]\n", proc, mutex);
free(buf);
}
示例8: main
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
int rank, nproc;
MPI_Comm_size(MPI_COMM_WORLD, &(nproc));
MPI_Comm_rank(MPI_COMM_WORLD, &(rank));
MPI_Win win;
MPI_Aint remote;
MPI_Aint local;
MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &win);
if(rank==0){
//int *a = (int*)malloc(sizeof(int));
int a= 4;
MPI_Win_attach(win, &a, sizeof(int));
MPI_Get_address(&a, &local);
MPI_Send(&local, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD);
}
else{
//MPI_Status reqstat;
//MPI_Recv(&sdisp_remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat );
int val;
MPI_Status reqstat;
MPI_Recv(&remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat );
MPI_Get(&val, 1, MPI_INT, 0, remote, 1, MPI_INT, win);
}
//MPI_Win_free(&win);
}
示例9: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
int *buf, bufsize;
int *result;
int *rmabuf, rsize, rcount;
MPI_Comm comm;
MPI_Win win;
MPI_Request req;
MPI_Datatype derived_dtp;
MTest_Init(&argc, &argv);
bufsize = 256 * sizeof(int);
buf = (int *) malloc(bufsize);
if (!buf) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
result = (int *) malloc(bufsize);
if (!result) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
rcount = 16;
rsize = rcount * sizeof(int);
rmabuf = (int *) malloc(rsize);
if (!rmabuf) {
fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
MPI_Type_commit(&derived_dtp);
/* The following loop is used to run through a series of communicators
* that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
while (MTestGetIntracommGeneral(&comm, 1, 1)) {
int count = 0;
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
/* To improve reporting of problems about operations, we
* change the error handler to errors return */
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
MPI_Win_fence(0, win);
TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
示例10: DO_OP_LOOP
void DO_OP_LOOP(int dst, int iter)
{
int i, x;
switch (OP_TYPE) {
case OP_ACC:
for (x = 0; x < iter; x++) {
for (i = 0; i < NOP; i++)
MPI_Accumulate(&locbuf[0], OP_SIZE, MPI_DOUBLE, dst, 0, OP_SIZE, MPI_DOUBLE,
MPI_SUM, win);
MPI_Win_flush(dst, win);
}
break;
case OP_PUT:
for (x = 0; x < iter; x++) {
for (i = 0; i < NOP; i++)
MPI_Put(&locbuf[0], OP_SIZE, MPI_DOUBLE, dst, 0, OP_SIZE, MPI_DOUBLE, win);
MPI_Win_flush(dst, win);
}
break;
case OP_GET:
for (x = 0; x < iter; x++) {
for (i = 0; i < NOP; i++)
MPI_Get(&locbuf[0], OP_SIZE, MPI_DOUBLE, dst, 0, OP_SIZE, MPI_DOUBLE, win);
MPI_Win_flush(dst, win);
}
break;
}
}
示例11: main
int main(int argc, char *argv[])
{
int n, myid, numprocs, i, ierr;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
MPI_Win nwin, piwin;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0) {
MPI_Win_create(&n, sizeof(int), 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &nwin);
MPI_Win_create(&pi, sizeof(double), 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &piwin);
}
else {
MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &nwin);
MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &piwin);
}
while (1) {
if (myid == 0) {
fprintf(stdout, "Enter the number of intervals: (0 quits) ");
fflush(stdout);
ierr=scanf("%d",&n);
pi = 0.0;
}
MPI_Win_fence(0, nwin);
if (myid != 0)
MPI_Get(&n, 1, MPI_INT, 0, 0, 1, MPI_INT, nwin);
MPI_Win_fence(0, nwin);
if (n == 0)
break;
else {
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs) {
x = h * ((double)i - 0.5);
sum += (4.0 / (1.0 + x*x));
}
mypi = h * sum;
MPI_Win_fence( 0, piwin);
MPI_Accumulate(&mypi, 1, MPI_DOUBLE, 0, 0, 1, MPI_DOUBLE,
MPI_SUM, piwin);
MPI_Win_fence(0, piwin);
if (myid == 0) {
fprintf(stdout, "pi is approximately %.16f, Error is %.16f\n",
pi, fabs(pi - PI25DT));
fflush(stdout);
}
}
}
MPI_Win_free(&nwin);
MPI_Win_free(&piwin);
MPI_Finalize();
return 0;
}
示例12: main
int main(int argc, char *argv[])
{
int rank, nprocs, A[SIZE2], B[SIZE2], i;
MPI_Win win;
int errs = 0;
MTest_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if (nprocs != 2) {
printf("Run this program with 2 processes\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
if (rank == 0) {
for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
for (i=0; i<SIZE1; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
MPI_Win_unlock(1, win);
}
for (i=0; i<SIZE1; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);
MPI_Win_unlock(1, win);
}
MPI_Win_free(&win);
for (i=0; i<SIZE1; i++)
if (B[i] != (-4)*(i+SIZE1)) {
printf("Get Error: B[%d] is %d, should be %d\n", i, B[i], (-4)*(i+SIZE1));
errs++;
}
}
else { /* rank=1 */
for (i=0; i<SIZE2; i++) B[i] = (-4)*i;
MPI_Win_create(B, SIZE2*sizeof(int), sizeof(int), MPI_INFO_NULL,
MPI_COMM_WORLD, &win);
MPI_Win_free(&win);
for (i=0; i<SIZE1; i++) {
if (B[i] != i) {
printf("Put Error: B[%d] is %d, should be %d\n", i, B[i], i);
errs++;
}
}
}
/* if (rank==0) printf("Done\n");*/
MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
示例13: gmr_get_typed
/** One-sided get operation with type arguments. Destination buffer must be private.
*
* @param[in] mreg Memory region
* @param[in] src Address of source data
* @param[in] src_count Number of elements of the given type at the source
* @param[in] src_type MPI datatype of the source elements
* @param[in] dst Address of destination buffer
* @param[in] dst_count Number of elements of the given type at the destination
* @param[in] src_type MPI datatype of the destination elements
* @param[in] size Number of bytes to transfer
* @param[in] proc Absolute process id of target process
* @return 0 on success, non-zero on failure
*/
int gmr_get_typed(gmr_t *mreg, void *src, int src_count, MPI_Datatype src_type,
void *dst, int dst_count, MPI_Datatype dst_type, int proc) {
int grp_proc;
gmr_size_t disp;
MPI_Aint lb, extent;
grp_proc = ARMCII_Translate_absolute_to_group(&mreg->group, proc);
ARMCII_Assert(grp_proc >= 0);
// Calculate displacement from beginning of the window
if (src == MPI_BOTTOM)
disp = 0;
else
disp = (gmr_size_t) ((uint8_t*)src - (uint8_t*)mreg->slices[proc].base);
// Perform checks
MPI_Type_get_true_extent(src_type, &lb, &extent);
ARMCII_Assert(mreg->lock_state != GMR_LOCK_UNLOCKED);
ARMCII_Assert_msg(disp >= 0 && disp < mreg->slices[proc].size, "Invalid remote address");
ARMCII_Assert_msg(disp + src_count*extent <= mreg->slices[proc].size, "Transfer is out of range");
MPI_Get(dst, dst_count, dst_type, grp_proc, (MPI_Aint) disp, src_count, src_type, mreg->window);
return 0;
}
示例14: add_gather_request
void add_gather_request(gather * g, size_t local_idx, int remote_rank,
size_t remote_idx, size_t req_id)
{
assert(g->valid);
#pragma omp critical
MPI_Get(g->output + local_idx * g->elt_size, 1, g->datatype,
remote_rank, remote_idx, 1, g->datatype, g->win);
}
示例15: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
int *buf, bufsize;
int *result;
int *rmabuf, rsize, rcount;
MPI_Comm comm;
MPI_Win win;
MPI_Request req;
MTest_Init(&argc, &argv);
bufsize = 256 * sizeof(int);
buf = (int *) malloc(bufsize);
if (!buf) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
result = (int *) malloc(bufsize);
if (!result) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
rcount = 16;
rsize = rcount * sizeof(int);
rmabuf = (int *) malloc(rsize);
if (!rmabuf) {
fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* The following illustrates the use of the routines to
* run through a selection of communicators and datatypes.
* Use subsets of these for tests that do not involve combinations
* of communicators, datatypes, and counts of datatypes */
while (MTestGetIntracommGeneral(&comm, 1, 1)) {
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Win_create(buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win);
/* To improve reporting of problems about operations, we
* change the error handler to errors return */
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
MPI_Win_fence(0, win);
TEST_FENCE_OP("Put",
MPI_Put(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
TEST_FENCE_OP("Get",
MPI_Get(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);