本文整理汇总了C++中MPIU_ERR_POP函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIU_ERR_POP函数的具体用法?C++ MPIU_ERR_POP怎么用?C++ MPIU_ERR_POP使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIU_ERR_POP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPID_nem_gm_finalize
int
MPID_nem_gm_finalize()
{
int mpi_errno = MPI_SUCCESS;
int max_send_tokens;
MPID_nem_gm_send_queue_t *e;
max_send_tokens = gm_num_send_tokens (MPID_nem_module_gm_port);
while (MPID_nem_module_gm_num_send_tokens < max_send_tokens && !MPID_nem_gm_queue_empty (send))
{
mpi_errno = MPID_nem_gm_recv_poll();
if (mpi_errno) MPIU_ERR_POP (mpi_errno);
}
while (MPID_nem_gm_send_free_queue)
{
e = MPID_nem_gm_send_free_queue;
MPID_nem_gm_send_free_queue = e->next;
MPIU_Free (e);
}
mpi_errno = MPID_nem_gm_lmt_finalize();
if (mpi_errno) MPIU_ERR_POP (mpi_errno);
gm_finalize();
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例2: MPIR_Ibsend_cancel
PMPI_LOCAL int MPIR_Ibsend_cancel( void *extra, int complete )
{
int mpi_errno = MPI_SUCCESS;
ibsend_req_info *ibsend_info = (ibsend_req_info *)extra;
MPI_Status status;
MPID_Request *req = ibsend_info->req;
/* FIXME: There should be no unreferenced args! */
/* Note that this value should always be 1 because
Grequest_complete is called on this request when it is
created */
MPIU_UNREFERENCED_ARG(complete);
/* Try to cancel the underlying request */
mpi_errno = MPIR_Cancel_impl(req);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIR_Wait_impl( &req->handle, &status );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIR_Test_cancelled_impl( &status, &ibsend_info->cancelled );
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例3: split_type
static int split_type(MPID_Comm * comm_ptr, int stype, int key,
MPID_Info *info_ptr, MPID_Comm ** newcomm_ptr)
{
MPID_Node_id_t id;
MPIR_Rank_t nid;
int mpi_errno = MPI_SUCCESS;
if (MPIDI_CH3I_Shm_supported()) {
mpi_errno = MPID_Get_node_id(comm_ptr, comm_ptr->rank, &id);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
else
id = comm_ptr->rank;
nid = (stype == MPI_COMM_TYPE_SHARED) ? id : MPI_UNDEFINED;
mpi_errno = MPIR_Comm_split_impl(comm_ptr, nid, key, newcomm_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
return mpi_errno;
/* --BEGIN ERROR HANDLING-- */
fn_fail:
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例4: MPID_nem_ptl_init_id
int MPID_nem_ptl_init_id(MPIDI_VC_t *vc)
{
int mpi_errno = MPI_SUCCESS;
MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc);
char *bc;
int pmi_errno;
int val_max_sz;
MPIU_CHKLMEM_DECL(1);
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_INIT_ID);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_INIT_ID);
pmi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
MPIU_ERR_CHKANDJUMP1(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %d", pmi_errno);
MPIU_CHKLMEM_MALLOC(bc, char *, val_max_sz, mpi_errno, "bc");
mpi_errno = vc->pg->getConnInfo(vc->pg_rank, bc, val_max_sz, vc->pg);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPID_nem_ptl_get_id_from_bc(bc, &vc_ptl->id, &vc_ptl->pt, &vc_ptl->ptg, &vc_ptl->ptc, &vc_ptl->ptr, &vc_ptl->ptrg, &vc_ptl->ptrc);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
vc_ptl->id_initialized = TRUE;
MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
fn_exit:
MPIU_CHKLMEM_FREEALL();
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_INIT_ID);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例5: MPIDI_CH3_SHM_Win_free
int MPIDI_CH3_SHM_Win_free(MPID_Win **win_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE);
MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE);
/* Free shared memory region */
if ((*win_ptr)->shm_allocated) {
/* free shm_base_addrs that's only used for shared memory windows */
MPIU_Free((*win_ptr)->shm_base_addrs);
/* detach from shared memory segment */
mpi_errno = MPIU_SHMW_Seg_detach((*win_ptr)->shm_segment_handle, (char **)&(*win_ptr)->shm_base_addr,
(*win_ptr)->shm_segment_len);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_SHMW_Hnd_finalize(&(*win_ptr)->shm_segment_handle);
}
mpi_errno = MPIDI_Win_free(win_ptr);
if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); }
fn_exit:
MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例6: MPID_Nem_nd_init
int MPID_Nem_nd_init(MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_ND_INIT);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_ND_INIT);
/* Initialize Executive */
mpi_errno = MPIU_ExInitialize();
if(mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno);
/* Initialize ND state machine */
mpi_errno = MPID_Nem_nd_sm_init();
if(mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno);
/* Listen for conns & create the business card */
mpi_errno = MPID_Nem_nd_listen_for_conn(pg_rank, bc_val_p, val_max_sz_p);
if(mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_ND_INIT);
return mpi_errno;
fn_fail:
MPIU_DBG_MSG_D(CH3_CHANNEL, VERBOSE, "failed, mpi_errno = %d", mpi_errno);
goto fn_exit;
}
示例7: MPIR_Igatherv_impl
int MPIR_Igatherv_impl(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
const int recvcounts[], const int displs[], MPI_Datatype recvtype,
int root, MPID_Comm *comm_ptr, MPI_Request *request)
{
int mpi_errno = MPI_SUCCESS;
int tag = -1;
MPID_Request *reqp = NULL;
MPID_Sched_t s = MPID_SCHED_NULL;
*request = MPI_REQUEST_NULL;
mpi_errno = MPID_Sched_next_tag(comm_ptr, &tag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPID_Sched_create(&s);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_Assert(comm_ptr->coll_fns != NULL);
MPIU_Assert(comm_ptr->coll_fns->Igatherv != NULL);
mpi_errno = comm_ptr->coll_fns->Igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm_ptr, s);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPID_Sched_start(&s, comm_ptr, tag, &reqp);
if (reqp)
*request = reqp->handle;
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例8: get_addr
static int get_addr(MPIDI_VC_t * vc, struct scif_portID *addr)
{
int mpi_errno = MPI_SUCCESS;
char *bc;
int pmi_errno;
int val_max_sz;
MPIU_CHKLMEM_DECL(1);
/* Allocate space for the business card */
pmi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
MPIU_ERR_CHKANDJUMP1(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**fail",
"**fail %d", pmi_errno);
MPIU_CHKLMEM_MALLOC(bc, char *, val_max_sz, mpi_errno, "bc");
mpi_errno = vc->pg->getConnInfo(vc->pg_rank, bc, val_max_sz, vc->pg);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
mpi_errno = scif_addr_from_bc(bc, &addr->node, &addr->port);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
fn_exit:
MPIU_CHKLMEM_FREEALL();
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例9: MPIDI_CH3I_Comm_init
int MPIDI_CH3I_Comm_init(void)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_COMM_INIT);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_COMM_INIT);
MPIR_Add_finalize(register_hook_finalize, NULL, MPIR_FINALIZE_CALLBACK_PRIO-1);
/* register hooks for keeping track of communicators */
mpi_errno = MPIDI_CH3U_Comm_register_create_hook(comm_created, NULL);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
#if defined HAVE_LIBHCOLL
if (MPIR_CVAR_CH3_ENABLE_HCOLL) {
mpi_errno = MPIDI_CH3U_Comm_register_create_hook(hcoll_comm_create, NULL);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIDI_CH3U_Comm_register_destroy_hook(hcoll_comm_destroy, NULL);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
#endif
mpi_errno = MPIDI_CH3U_Comm_register_destroy_hook(comm_destroyed, NULL);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_COMM_INIT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例10: send_sreq_data
static int send_sreq_data(MPIDI_VC_t *vc, MPID_Request *sreq, knem_cookie_t *s_cookiep)
{
int mpi_errno = MPI_SUCCESS;
int dt_contig;
MPI_Aint dt_true_lb;
MPIDI_msg_sz_t data_sz;
MPID_Datatype * dt_ptr;
/* MT: this code assumes only one thread can be at this point at a time */
if (knem_fd < 0) {
mpi_errno = open_knem_dev();
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
/* find out contig/noncontig, size, and lb for the datatype */
MPIDI_Datatype_get_info(sreq->dev.user_count, sreq->dev.datatype,
dt_contig, data_sz, dt_ptr, dt_true_lb);
if (dt_contig) {
/* handle the iov creation ourselves */
sreq->dev.iov[0].MPID_IOV_BUF = (char *)sreq->dev.user_buf + dt_true_lb;
sreq->dev.iov[0].MPID_IOV_LEN = data_sz;
sreq->dev.iov_count = 1;
}
else {
/* use the segment routines to handle the iovec creation */
if (sreq->dev.segment_ptr == NULL) {
sreq->dev.iov_count = MPID_IOV_LIMIT;
sreq->dev.iov_offset = 0;
/* segment_ptr may be non-null when this is a continuation of a
many-part message that we couldn't fit in one single flight of
iovs. */
sreq->dev.segment_ptr = MPID_Segment_alloc();
MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno,
MPI_ERR_OTHER, "**nomem",
"**nomem %s", "MPID_Segment_alloc");
MPID_Segment_init(sreq->dev.user_buf, sreq->dev.user_count,
sreq->dev.datatype, sreq->dev.segment_ptr, 0);
sreq->dev.segment_first = 0;
sreq->dev.segment_size = data_sz;
/* FIXME we should write our own function that isn't dependent on
the in-request iov array. This will let us use IOVs that are
larger than MPID_IOV_LIMIT. */
mpi_errno = MPIDI_CH3U_Request_load_send_iov(sreq, &sreq->dev.iov[0],
&sreq->dev.iov_count);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
}
mpi_errno = do_dma_send(vc, sreq, sreq->dev.iov_count, sreq->dev.iov, s_cookiep);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例11: MPIR_Comm_dup_impl
int MPIR_Comm_dup_impl(MPID_Comm *comm_ptr, MPID_Comm **newcomm_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPID_Attribute *new_attributes = 0;
/* Copy attributes, executing the attribute copy functions */
/* This accesses the attribute dup function through the perprocess
structure to prevent comm_dup from forcing the linking of the
attribute functions. The actual function is (by default)
MPIR_Attr_dup_list
*/
if (MPIR_Process.attr_dup) {
mpi_errno = MPIR_Process.attr_dup( comm_ptr->handle,
comm_ptr->attributes,
&new_attributes );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
/* Generate a new context value and a new communicator structure */
/* We must use the local size, because this is compared to the
rank of the process in the communicator. For intercomms,
this must be the local size */
mpi_errno = MPIR_Comm_copy( comm_ptr, comm_ptr->local_size, newcomm_ptr );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
(*newcomm_ptr)->attributes = new_attributes;
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例12: MPID_Nem_nd_vc_terminate
int MPID_Nem_nd_vc_terminate (MPIDI_VC_t *vc)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_CH3I_VC *vc_ch = (MPIDI_CH3I_VC *)vc->channel_private;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_ND_VC_TERMINATE);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_ND_VC_TERMINATE);
/* Poll till no more pending/posted sends */
while(!MPID_NEM_ND_VCCH_NETMOD_POSTED_SENDQ_EMPTY(vc)
|| !MPID_NEM_ND_VCCH_NETMOD_PENDING_SENDQ_EMPTY(vc)){
mpi_errno = MPID_Nem_nd_sm_poll(1);
if(mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno);
}
vc_ch->next = NULL;
vc_ch->prev = NULL;
MPID_NEM_ND_VCCH_NETMOD_STATE_SET(vc, MPID_NEM_ND_VC_STATE_DISCONNECTED);
mpi_errno = MPID_Nem_nd_conn_disc(MPID_NEM_ND_VCCH_NETMOD_CONN_HND_GET(vc));
if(mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno);
MPID_NEM_ND_VCCH_NETMOD_CONN_HND_SET(vc, MPID_NEM_ND_CONN_HND_INVALID);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_ND_VC_TERMINATE);
return mpi_errno;
fn_fail:
MPIU_DBG_MSG_D(CH3_CHANNEL, VERBOSE, "failed, mpi_errno = %d", mpi_errno);
goto fn_exit;
}
示例13: MPIR_Alltoallv
int MPIR_Alltoallv(const void *sendbuf, const int *sendcounts, const int *sdispls,
MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *rdispls,
MPI_Datatype recvtype, MPID_Comm *comm_ptr, int *errflag)
{
int mpi_errno = MPI_SUCCESS;
if (comm_ptr->comm_kind == MPID_INTRACOMM) {
/* intracommunicator */
mpi_errno = MPIR_Alltoallv_intra(sendbuf, sendcounts, sdispls,
sendtype, recvbuf, recvcounts,
rdispls, recvtype, comm_ptr, errflag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
} else {
/* intercommunicator */
mpi_errno = MPIR_Alltoallv_inter(sendbuf, sendcounts, sdispls,
sendtype, recvbuf, recvcounts,
rdispls, recvtype, comm_ptr, errflag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例14: MPIR_Iallreduce_naive
int MPIR_Iallreduce_naive(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPID_Comm *comm_ptr, MPID_Sched_t s)
{
int mpi_errno = MPI_SUCCESS;
int rank;
rank = comm_ptr->rank;
if ((sendbuf == MPI_IN_PLACE) && (rank != 0)) {
mpi_errno = MPIR_Ireduce_intra(recvbuf, NULL, count, datatype, op, 0, comm_ptr, s);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
else {
mpi_errno = MPIR_Ireduce_intra(sendbuf, recvbuf, count, datatype, op, 0, comm_ptr, s);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
MPID_SCHED_BARRIER(s);
mpi_errno = MPIR_Ibcast_intra(recvbuf, count, datatype, 0, comm_ptr, s);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例15: MPIR_Allreduce_impl
int MPIR_Allreduce_impl(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPID_Comm *comm_ptr,
int *errflag)
{
int mpi_errno = MPI_SUCCESS;
if (comm_ptr->coll_fns != NULL && comm_ptr->coll_fns->Allreduce != NULL)
{
mpi_errno = comm_ptr->coll_fns->Allreduce(sendbuf, recvbuf, count, datatype, op, comm_ptr, errflag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
else
{
if (comm_ptr->comm_kind == MPID_INTRACOMM) {
/* intracommunicator */
mpi_errno = MPIR_Allreduce_intra(sendbuf, recvbuf, count, datatype, op, comm_ptr, errflag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
else {
/* intercommunicator */
mpi_errno = MPIR_Allreduce_inter(sendbuf, recvbuf, count, datatype, op, comm_ptr, errflag);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
}
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}