本文整理汇总了C++中MPI_Waitany函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Waitany函数的具体用法?C++ MPI_Waitany怎么用?C++ MPI_Waitany使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Waitany函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MEX_post_recv
void NEKTAR_MEX::MEX_max_fabs(double *val){
double a,b;
double *dp;
int *map;
int i, j,partner,index;
MEX_post_recv();
for (partner = 0; partner < Npartners; ++partner){
dp = send_buffer[partner];
map = message_send_map[partner];
for (i = 0; i < message_size[partner]; ++i)
dp[i] = val[map[i]];
}
MEX_post_send();
for (partner = 0; partner < Npartners; partner++){
MPI_Waitany(Npartners,request_recv,&index,MPI_STATUS_IGNORE);
dp = recv_buffer[index];
map = message_recv_map[index];
for (i = 0; i < message_size[index]; ++i){
j = map[i];
a = fabs(val[j]);
b = fabs(dp[i]);
if (b>a)
val[j] = dp[i];
}
}
MPI_Waitall(Npartners,request_send,MPI_STATUS_IGNORE);
}
示例2: trace_begin
//.........这里部分代码省略.........
if (!(rank >= n_proc - column)){
// neighbor 1
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[6], \n", rank, iter, n_send_fish[6]);
MPI_Irecv(receive_fish[6], n_receive_fish, fishtype, rankNeighbor[6], MPI_ANY_TAG, comm, &recvReqArray[6]);
// neighbor 6
MPI_Isend(send_fish[6], n_send_fish[6], fishtype, rankNeighbor[6], mesTag, comm, &sendReqArray[6]);
} else {
recvReqArray[6] = MPI_REQUEST_NULL;
}
// sendrecv from neighbor 2, 5
if (!(rank < column) && ((rank + 1) % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[2], \n", rank, iter, n_send_fish[2]);
// neighbor 2
MPI_Isend(send_fish[2], n_send_fish[2], fishtype, rankNeighbor[2], mesTag, comm, &sendReqArray[2]);
// neighbor 5
MPI_Irecv(receive_fish[2], n_receive_fish, fishtype, rankNeighbor[2], MPI_ANY_TAG, comm, &recvReqArray[2]);
} else {
recvReqArray[2] = MPI_REQUEST_NULL;
}
if (!(rank >= n_proc - column) && (rank % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[5], \n", rank, iter, n_send_fish[5]);
// neighbor 2
MPI_Irecv(receive_fish[5], n_receive_fish, fishtype, rankNeighbor[5], MPI_ANY_TAG, comm, &recvReqArray[5]);
// neighbor 5
MPI_Isend(send_fish[5], n_send_fish[5], fishtype, rankNeighbor[5], mesTag, comm, &sendReqArray[5]);
} else {
recvReqArray[5] = MPI_REQUEST_NULL;
}
//j++;
// sendrecv from neighbor 3, 4
if (rank % column != 0) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[3], \n", rank, iter, n_send_fish[3]);
// neighbor 3
MPI_Isend(send_fish[3], n_send_fish[3], fishtype, rankNeighbor[3], mesTag, comm, &sendReqArray[3]);
// neighbor 4
MPI_Irecv(receive_fish[3], n_receive_fish, fishtype, rankNeighbor[3], MPI_ANY_TAG, comm, &recvReqArray[3]);
} else {
recvReqArray[3] = MPI_REQUEST_NULL;
}
if ((rank + 1) % column != 0){
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[4], \n", rank, iter, n_send_fish[4]);
// neighbor 3
MPI_Irecv(receive_fish[4], n_receive_fish, fishtype, rankNeighbor[4], MPI_ANY_TAG, comm, &recvReqArray[4]);
// neighbor 4
MPI_Isend(send_fish[4], n_send_fish[4], fishtype, rankNeighbor[4], mesTag, comm, &sendReqArray[4]);
} else {
recvReqArray[4] = MPI_REQUEST_NULL;
}
}
//*************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////
// wait_for_fish()
////////////////////////////////////////////////////////////////////////////////////////
//*************************************************************************************/
void wait_for_fish(MPI_Request* recvReqArray, int* n_fish) {
// Now wait for any recv to come back.
int k;
int arrayIndex;
int numNeighbor = NUM_NEIGHBOR;
if ((rank < column) || ((rank + 1) % column == 0) || (rank >= n_proc - column) || (rank % column == 0))
numNeighbor = 5;
if (rank == 0 || rank == column - 1 || rank == n_proc - 1 || rank == n_proc - column)
numNeighbor = 3;
MPI_Status statusArray[numNeighbor];
// Zero the count array.
for (k = 0; k < NUM_NEIGHBOR; k++) {
n_fish[k] = 0;
}
for (k = 0; k < numNeighbor; ++k) {
MPI_Waitany(NUM_NEIGHBOR, recvReqArray, &arrayIndex, &statusArray[k]);
//recvReqArray[arrayIndex] = MPI_REQUEST_NULL;
assert(arrayIndex >= 0 && arrayIndex <= 7);
if (arrayIndex != MPI_UNDEFINED) {
dbg--;
MPI_Get_count(&statusArray[k], fishtype, &n_fish[arrayIndex]);
}
}
}
示例3: spmd_waitany
SEXP spmd_waitany(SEXP R_count, SEXP R_status){
int index;
spmd_errhandler(
MPI_Waitany(INTEGER(R_count)[0], request, &index,
&status[INTEGER(R_status)[0]]));
return(AsInt(index));
} /* End of spmd_waitany(). */
示例4: event_loop
static void event_loop(event_queue_t queue,int block){
while(queue->pending){
Debug("MPI waiting for %d events",queue->pending);
int index[queue->pending];
int completed;
MPI_Status status[queue->pending];
if (block) {
Debug("MPI_Waitsome");
//int res = MPI_Waitsome(queue->pending,queue->request,&completed,index,status);
int res = MPI_Waitany(queue->pending,queue->request,index,status);
completed=1;
Debug("MPI_Waitsome : %d",res);
if (res != MPI_SUCCESS) Abort("MPI_Waitsome");
queue->wait_some_calls++;
if (completed>1) queue->wait_some_multi++;
block=0;
} else {
Debug("MPI_Testsome");
//int res = MPI_Testsome(queue->pending,queue->request,&completed,index,status);
int flag;
int res = MPI_Testany(queue->pending,queue->request,index,&flag,status);
completed=flag?1:0;
Debug("MPI_Testsome : %d",res);
if (res != MPI_SUCCESS) Abort("MPI_Testsome");
queue->test_some_calls++;
if (completed==0) {
queue->test_some_none++;
Debug("MPI exit event loop");
return;
}
if (completed>1) queue->test_some_multi++;
}
Debug("MPI completion of %d events",completed);
event_callback cb[completed];
void *ctx[completed];
for(int i=0;i<completed;i++){
cb[i]=queue->cb[index[i]];
queue->cb[index[i]]=NULL;
ctx[i]=queue->context[index[i]];
}
int k=0;
for(int i=0;i<queue->pending;i++){
if (queue->cb[i]) {
if (k<i) {
queue->request[k]=queue->request[i];
queue->cb[k]=queue->cb[i];
queue->context[k]=queue->context[i];
}
k++;
}
}
queue->pending=k;
for(int i=0;i<completed;i++) {
Debug("MPI call back");
cb[i](ctx[i],&status[i]);
Debug("MPI call back done");
}
}
Debug("MPI exit loop");
}
示例5: Zoltan_Comm_Do_Wait
int Zoltan_Comm_Do_Wait(
ZOLTAN_COMM_OBJ * plan, /* communication data structure */
int tag, /* message tag for communicating */
char *send_data, /* array of data I currently own */
int nbytes, /* multiplier for sizes */
char *recv_data) /* array of data I'll own after comm */
{
MPI_Status status; /* return from Waitany */
int my_proc; /* processor ID */
int self_num; /* where in send list my_proc appears */
int i, j, k, jj; /* loop counters */
MPI_Comm_rank(plan->comm, &my_proc);
/* Wait for messages to arrive & unpack them if necessary. */
/* Note: since request is in plan, could wait in later routine. */
if (plan->indices_from == NULL) { /* No copying required */
if (plan->nrecvs > 0) {
MPI_Waitall(plan->nrecvs, plan->request, plan->status);
}
}
else { /* Need to copy into recv_data. */
if (plan->self_msg) { /* Unpack own data before waiting */
for (self_num = 0; self_num < plan->nrecvs + plan->self_msg; self_num++)
if (plan->procs_from[self_num] == my_proc) break;
k = plan->starts_from[self_num];
if (!plan->sizes_from || plan->sizes_from[self_num]) {
for (j = plan->lengths_from[self_num]; j; j--) {
memcpy(&recv_data[plan->indices_from[k] * nbytes],
&plan->recv_buff[k * nbytes], nbytes);
k++;
}
}
}
else
self_num = plan->nrecvs;
for (jj = 0; jj < plan->nrecvs; jj++) {
MPI_Waitany(plan->nrecvs, plan->request, &i, &status);
if (i == MPI_UNDEFINED) break; /* No more receives */
if (i >= self_num) i++;
k = plan->starts_from[i];
for (j = plan->lengths_from[i]; j; j--) {
memcpy(&recv_data[plan->indices_from[k] * nbytes],
&plan->recv_buff[k * nbytes], nbytes);
k++;
}
}
ZOLTAN_FREE(&plan->recv_buff);
}
return (ZOLTAN_OK);
}
示例6: MPI_Waitany
/*!
Waits for any receive to completes and returns the associated rank.
If there are no active recevies, the call returns MPI_UNDEFINED.
\param if set to true
\result The rank of the completed receive or MPI_UNDEFINED if there was
no active receives.
*/
int DataCommunicator::waitAnyRecv()
{
// Wait for a receive to complete
int id;
MPI_Waitany(m_recvRequests.size(), m_recvRequests.data(), &id, MPI_STATUS_IGNORE);
if (id == MPI_UNDEFINED) {
return MPI_UNDEFINED;
}
// If the buffer is a double buffer, swap it
RecvBuffer &recvBuffer = m_recvBuffers[id];
if (recvBuffer.isDouble()) {
recvBuffer.swap();
}
// Rank of the request
int rank = m_recvRanks[id];
// Restart the recevie
if (areRecvsContinuous()) {
startRecv(rank);
}
// Return the rank associated to the completed receive
return rank;
}
示例7: main
int
main (int argc, char **argv)
{
int nprocs = -1;
int rank = -1;
char processor_name[128];
int namelen = 128;
int buf0[buf_size];
int buf1[buf_size];
int buf2[buf_size];
int i, flipbit, done;
MPI_Status status;
/* init */
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Get_processor_name (processor_name, &namelen);
printf ("(%d) is alive on %s\n", rank, processor_name);
fflush (stdout);
MPI_Barrier (MPI_COMM_WORLD);
if (nprocs < 2)
{
printf ("not enough tasks\n");
}
else if (rank == 0)
{
MPI_Request reqs[3];
MPI_Irecv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[0]);
MPI_Irecv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[1]);
MPI_Irecv (buf2, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[2]);
for (i = 3; i > 0; i--) {
MPI_Waitany (i, reqs, &done, &status);
assert (done == (i - 1));
/* don't let next one start until after waitany call... */
MPI_Send (&flipbit, 1, MPI_INT, 1, i, MPI_COMM_WORLD);
}
}
else if (rank == 1)
{
memset (buf0, 1, buf_size*sizeof(int));
for (i = 3; i > 0; i--) {
MPI_Recv (&flipbit, 1, MPI_INT, 0, i, MPI_COMM_WORLD, &status);
MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
MPI_Barrier (MPI_COMM_WORLD);
MPI_Finalize ();
printf ("(%d) Finished normally\n", rank);
}
示例8: MPI_Waitany_Wrapper
int MPI_Waitany_Wrapper(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status)
{
#ifdef COMMPI
char *me = ft_mpi_routine_names[MPI_Waitany_cntr];
int ierr;
FT_INITIALIZE(me, ft_global_ht)
ft_mpi_cntrs[MPI_Total_cntr]++;
ft_mpi_cntrs[MPI_Waitany_cntr]++;
#ifdef TERRY_TRACE
if (terry_trace_flag == TRUE) {
TERRY_MPI_Waitany_cntr++;
TRCHKGT(BEFORE_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0);
}
#endif
ierr = MPI_Waitany(count, array_of_requests, index, status);
#ifdef TERRY_TRACE
if (terry_trace_flag == TRUE) {
TRCHKGT(AFTER_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0);
}
#endif
FT_FINALIZE(me, ft_global_ht, 1)
return(ierr);
#else
return(0);
#endif
}
示例9: mpi_waitany
void mpi_waitany (int *count, int *request, int *index, int *status, int *ierr)
{
int c_index;
*ierr = MPI_Waitany(*count, request, &c_index, (MPI_Status *)status);
*index = c_index + 1; /* Fortran counts from one not from zero */
return;
}
示例10: main
int main(int argc, char **argv) {
int a;
MPI_Request reqs[2];
MPI_Waitany(2, reqs, &a, MPI_STATUS_IGNORE);
return 0;
}
示例11: MatStashScatterGetMesg_Ref
static PetscErrorCode MatStashScatterGetMesg_Ref(MatStash *stash,PetscMPIInt *nvals,PetscInt **rows,PetscInt **cols,PetscScalar **vals,PetscInt *flg)
{
PetscErrorCode ierr;
PetscMPIInt i,*flg_v = stash->flg_v,i1,i2;
PetscInt bs2;
MPI_Status recv_status;
PetscBool match_found = PETSC_FALSE;
PetscFunctionBegin;
*flg = 0; /* When a message is discovered this is reset to 1 */
/* Return if no more messages to process */
if (stash->nprocessed == stash->nrecvs) PetscFunctionReturn(0);
bs2 = stash->bs*stash->bs;
/* If a matching pair of receives are found, process them, and return the data to
the calling function. Until then keep receiving messages */
while (!match_found) {
if (stash->reproduce) {
i = stash->reproduce_count++;
ierr = MPI_Wait(stash->recv_waits+i,&recv_status);CHKERRQ(ierr);
} else {
ierr = MPI_Waitany(2*stash->nrecvs,stash->recv_waits,&i,&recv_status);CHKERRQ(ierr);
}
if (recv_status.MPI_SOURCE < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Negative MPI source!");
/* Now pack the received message into a structure which is usable by others */
if (i % 2) {
ierr = MPI_Get_count(&recv_status,MPIU_SCALAR,nvals);CHKERRQ(ierr);
flg_v[2*recv_status.MPI_SOURCE] = i/2;
*nvals = *nvals/bs2;
} else {
ierr = MPI_Get_count(&recv_status,MPIU_INT,nvals);CHKERRQ(ierr);
flg_v[2*recv_status.MPI_SOURCE+1] = i/2;
*nvals = *nvals/2; /* This message has both row indices and col indices */
}
/* Check if we have both messages from this proc */
i1 = flg_v[2*recv_status.MPI_SOURCE];
i2 = flg_v[2*recv_status.MPI_SOURCE+1];
if (i1 != -1 && i2 != -1) {
*rows = stash->rindices[i2];
*cols = *rows + *nvals;
*vals = stash->rvalues[i1];
*flg = 1;
stash->nprocessed++;
match_found = PETSC_TRUE;
}
}
PetscFunctionReturn(0);
}
示例12: mpi_waitany_
void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
MPI_Request* reqs;
int i;
reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
reqs[i] = find_request(requests[i]);
}
*ierr = MPI_Waitany(*count, reqs, index, status);
free(reqs);
}
示例13: MPI_Waitany
/**
* \brief Waits for any socket to complete operation (\b irecv or \b isend). Used to process data in the arrival order.
* <b>Unlocking of the socket must be done by client to free the socket</b>. For performance reason tests of the ID may be omitted.
*/
socket_t *socket_seekWait(const channel_t * ch, int direction)
{
int num;
MPI_Status status;
MPI_Waitany(ch->socketsN[direction], ch->requests[direction], &num, &status);
if(num != MPI_UNDEFINED) {
socket_t *s = ch->sockets[direction] + num;
if(!s->locked) error("socket_seekWait: MPI_Waitany pointed to the unlocked socket (cpu = %d, direction = %s).", s->cpu, (s->direction) ? "outcome" : "income");
return s;
}
return NULL;
}
示例14: sync_cells_direct
void sync_cells_direct(void (*copy_func)(int, int, int, int, int, int, vektor),
void (*pack_func)(msgbuf*, int, int, int, vektor),
void (*unpack_func)(msgbuf*, int, int, int), int all) {
int i,k;
int sendCells;
int recvCells;
int totalOperations;
if (all){
sendCells = lb_nTotalComms;
recvCells = lb_nTotalComms;
} else {
sendCells = lb_nTotalComms-lb_nForceComms;
recvCells = lb_nForceComms;
}
totalOperations = sendCells + recvCells;
MPI_Status stat;
empty_mpi_buffers();
for (i = 0; i<sendCells;++i){
/*Send data away*/
lb_copyCellDataToSend(&lb_send_buf[i], lb_sendCells[i], lb_nSendCells[i], pack_func, lb_commIndexToCpu[i]);
isend_buf(&lb_send_buf[i], lb_commIndexToCpu[i], &lb_req_send[i]);
lb_requests[i] = lb_req_send[i];
lb_request_indices[i] = -1; /* Indicates no processing required */
}
for (i = 0; i<recvCells;++i){
/*Start receiving data*/
k = (lb_nTotalComms-1)-i;
irecv_buf(&lb_recv_buf[k], lb_commIndexToCpu[k], &lb_req_recv[k]);
lb_requests[i+sendCells] = lb_req_recv[k];
lb_request_indices[i+sendCells] = k;
}
/*Receive and process data as soon as something is available*/
for (i = totalOperations; i>0; i--){
int finished;
MPI_Waitany(i, lb_requests, &finished, &stat);
int ind = lb_request_indices[finished];
if (ind != -1){
MPI_Get_count(&stat, REAL, &lb_recv_buf[ind].n);
lb_unpackCellDataFromBuffer(&lb_recv_buf[ind], lb_commIndexToCpu[ind], (*unpack_func));
}
lb_requests[finished] = lb_requests[i-1];
lb_request_indices[finished] = lb_request_indices[i-1];
}
}
示例15: Java_mpi_Request_waitAny
JNIEXPORT jint JNICALL Java_mpi_Request_waitAny(
JNIEnv *env, jclass clazz, jlongArray requests)
{
int count = (*env)->GetArrayLength(env, requests);
jlong* jReq;
MPI_Request *cReq;
ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
int index;
int rc = MPI_Waitany(count, cReq, &index, MPI_STATUS_IGNORE);
ompi_java_exceptionCheck(env, rc);
ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
return index;
}