本文整理汇总了C++中MPIU_DBG_MSG_FMT函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIU_DBG_MSG_FMT函数的具体用法?C++ MPIU_DBG_MSG_FMT怎么用?C++ MPIU_DBG_MSG_FMT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIU_DBG_MSG_FMT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPIR_Assert_fail_fmt
int MPIR_Assert_fail_fmt(const char *cond, const char *file_name, int line_num, const char *fmt, ...)
{
char msg[MPIU_ASSERT_FMT_MSG_MAX_SIZE] = {'\0'};
va_list vl;
va_start(vl,fmt);
vsnprintf(msg, sizeof(msg), fmt, vl); /* don't check rc, can't handle it anyway */
MPL_VG_PRINTF_BACKTRACE("Assertion failed in file %s at line %d: %s\n",
file_name, line_num, cond);
MPL_VG_PRINTF_BACKTRACE("%s\n", msg);
MPIU_Internal_error_printf("Assertion failed in file %s at line %d: %s\n",
file_name, line_num, cond);
MPIU_Internal_error_printf("%s\n", msg);
MPIU_DBG_MSG_FMT(ALL, TERSE,
(MPIU_DBG_FDEST,
"Assertion failed in file %s at line %d: %s",
file_name, line_num, cond));
MPIU_DBG_MSG_FMT(ALL, TERSE, (MPIU_DBG_FDEST,"%s",msg));
MPID_Abort(NULL, MPI_SUCCESS, 1, NULL);
return MPI_ERR_INTERN; /* never get here, abort should kill us */
}
示例2: MPID_nem_tcp_check_sock_status
MPID_NEM_TCP_SOCK_STATUS_t
MPID_nem_tcp_check_sock_status(const struct pollfd *const plfd)
{
int rc = MPID_NEM_TCP_SOCK_NOEVENT;
if (plfd->revents & POLLERR)
{
rc = MPID_NEM_TCP_SOCK_ERROR_EOF;
MPIU_DBG_MSG_FMT(NEM_SOCK_DET, VERBOSE, (MPIU_DBG_FDEST, "POLLERR on socket"));
goto fn_exit;
}
if (plfd->revents & POLLIN || plfd->revents & POLLOUT)
{
int error=0;
socklen_t n = sizeof(error);
n = sizeof(error);
if (getsockopt(plfd->fd, SOL_SOCKET, SO_ERROR, &error, &n) < 0 || error != 0)
{
rc = MPID_NEM_TCP_SOCK_ERROR_EOF; /* (N1) */
MPIU_DBG_MSG_FMT(NEM_SOCK_DET, VERBOSE, (MPIU_DBG_FDEST, "getsockopt failure. error=%d:%s", error, MPIU_Strerror(error)));
goto fn_exit;
}
rc = MPID_NEM_TCP_SOCK_CONNECTED;
}
fn_exit:
return rc;
}
示例3: scif_addr_from_bc
static int scif_addr_from_bc(const char *business_card, uint16_t * addr, uint16_t * port)
{
int mpi_errno = MPI_SUCCESS;
int ret;
int tmp;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_SCIF_GET_ADDR_PORT_FROM_BC);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_SCIF_GET_ADDR_PORT_FROM_BC);
ret = MPIU_Str_get_int_arg(business_card, MPIDI_CH3I_PORT_KEY, &tmp);
*port = (uint16_t) tmp;
/* MPIU_STR_FAIL is not a valid MPI error code so we store the
* result in ret instead of mpi_errno. */
MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS, mpi_errno, MPI_ERR_OTHER,
"**argstr_missingport");
ret = MPIU_Str_get_int_arg(business_card, MPIDI_CH3I_NODE_KEY, &tmp);
*addr = (uint16_t) tmp;
MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS, mpi_errno, MPI_ERR_OTHER,
"**argstr_missingnode");
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_SCIF_GET_ADDR_PORT_FROM_BC);
return mpi_errno;
fn_fail:
MPIU_DBG_MSG_FMT(NEM_SOCK_DET, VERBOSE,
(MPIU_DBG_FDEST, "failure. mpi_errno = %d", mpi_errno));
goto fn_exit;
}
示例4: created
/* This dequeues req from the posted recv queue, set req's error code to comm_fail, and updates the req pointer.
Note that this creates a new error code if one hasn't already been created (i.e., if *error is MPI_SUCCESS). */
static inline void dequeue_and_set_error(MPID_Request **req, MPID_Request *prev_req, int *error, int rank)
{
MPID_Request *next = (*req)->dev.next;
if (*error == MPI_SUCCESS) {
if (rank == MPI_PROC_NULL)
MPIU_ERR_SET(*error, MPIX_ERR_PROC_FAIL_STOP, "**comm_fail");
else
MPIU_ERR_SET1(*error, MPIX_ERR_PROC_FAIL_STOP, "**comm_fail", "**comm_fail %d", rank);
}
/* remove from queue */
if (recvq_posted_head == *req)
recvq_posted_head = (*req)->dev.next;
else
prev_req->dev.next = (*req)->dev.next;
if (recvq_posted_tail == *req)
recvq_posted_tail = prev_req;
MPIR_T_DEC(RECVQ_STATISTICS, posted_qlen);
/* set error and complete */
(*req)->status.MPI_ERROR = *error;
MPIDI_CH3U_Request_complete(*req);
MPIU_DBG_MSG_FMT(CH3_OTHER, VERBOSE,
(MPIU_DBG_FDEST, "set error of req %p (%#08x) to %#x and completing.",
*req, (*req)->handle, *error));
*req = next;
}
示例5: MPIDI_CH3U_Buffer_allocate
void MPIDI_CH3U_Buffer_allocate(
const void * const sbuf, MPI_Aint scount, MPI_Datatype sdt, int * smpi_errno,
void ** rbuf_handle, MPI_Aint rcount, MPI_Datatype rdt, MPIDI_msg_sz_t * rsz,
int * rmpi_errno)
{
int sdt_contig;
int rdt_contig;
MPI_Aint sdt_true_lb, rdt_true_lb;
MPIDI_msg_sz_t sdata_sz;
MPIDI_msg_sz_t rdata_sz;
MPID_Datatype * sdt_ptr;
MPID_Datatype * rdt_ptr;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);
*smpi_errno = MPI_SUCCESS;
*rmpi_errno = MPI_SUCCESS;
MPIDI_Datatype_get_info(scount, sdt, sdt_contig, sdata_sz, sdt_ptr, sdt_true_lb);
MPIDI_Datatype_get_info(rcount, rdt, rdt_contig, rdata_sz, rdt_ptr, rdt_true_lb);
/* --BEGIN ERROR HANDLING-- */
if (sdata_sz > rdata_sz)
{
MPIU_DBG_MSG_FMT(CH3_OTHER,TYPICAL,(MPIU_DBG_FDEST,
"message truncated, sdata_sz=" MPIDI_MSG_SZ_FMT " rdata_sz=" MPIDI_MSG_SZ_FMT,
sdata_sz, rdata_sz));
sdata_sz = rdata_sz;
*rmpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TRUNCATE, "**truncate", "**truncate %d %d", sdata_sz, rdata_sz );
}
/* --END ERROR HANDLING-- */
if (sdata_sz == 0)
{
*rsz = 0;
goto fn_exit;
}
if (sdt_contig && rdt_contig)
{
*rbuf_handle = (void *)MPIU_Malloc(sdata_sz);
MPIU_Assert(*rbuf_handle);
*rsz = sdata_sz;
}
else
{
/* --BEGIN ERROR HANDLING-- */
MPIU_DBG_MSG(CH3_OTHER,TYPICAL,"Sender and receiver datatypes are not contiguous");
*smpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**zcopybufalloc", "**zcopybufalloc %d %d", scount, rcount);
*rmpi_errno = *smpi_errno;
*rsz = 0;
goto fn_exit;
/* --END ERROR HANDLING-- */
}
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_BUFFER_ALLOCATE);
}
示例6: group
/*@
MPIDI_CH3U_VC_SendClose - Initiate a close on a virtual connection
Input Parameters:
+ vc - Virtual connection to close
- i - rank of virtual connection within a process group (used for debugging)
Notes:
The current state of this connection must be either 'MPIDI_VC_STATE_ACTIVE'
or 'MPIDI_VC_STATE_REMOTE_CLOSE'.
@*/
int MPIDI_CH3U_VC_SendClose( MPIDI_VC_t *vc, int rank )
{
MPIDI_CH3_Pkt_t upkt;
MPIDI_CH3_Pkt_close_t * close_pkt = &upkt.close;
MPID_Request * sreq;
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE);
MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex);
MPIU_Assert( vc->state == MPIDI_VC_STATE_ACTIVE ||
vc->state == MPIDI_VC_STATE_REMOTE_CLOSE );
MPIDI_Pkt_init(close_pkt, MPIDI_CH3_PKT_CLOSE);
close_pkt->ack = (vc->state == MPIDI_VC_STATE_ACTIVE) ? FALSE : TRUE;
/* MT: this is not thread safe, the POBJ CS is scoped to the vc and
* doesn't protect this global correctly */
MPIDI_Outstanding_close_ops += 1;
MPIU_DBG_MSG_FMT(CH3_DISCONNECT,TYPICAL,(MPIU_DBG_FDEST,
"sending close(%s) on vc (pg=%p) %p to rank %d, ops = %d",
close_pkt->ack ? "TRUE" : "FALSE", vc->pg, vc,
rank, MPIDI_Outstanding_close_ops));
/*
* A close packet acknowledging this close request could be
* received during iStartMsg, therefore the state must
* be changed before the close packet is sent.
*/
if (vc->state == MPIDI_VC_STATE_ACTIVE) {
MPIDI_CHANGE_VC_STATE(vc, LOCAL_CLOSE);
}
else {
MPIU_Assert( vc->state == MPIDI_VC_STATE_REMOTE_CLOSE );
MPIDI_CHANGE_VC_STATE(vc, CLOSE_ACKED);
}
mpi_errno = MPIDI_CH3_iStartMsg(vc, close_pkt, sizeof(*close_pkt), &sreq);
MPIR_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|send_close_ack");
if (sreq != NULL) {
/* There is still another reference being held by the channel. It
will not be released until the pkt is actually sent. */
MPID_Request_release(sreq);
}
fn_exit:
MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex);
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例7: MPID_nem_mxm_vc_init
int MPID_nem_mxm_vc_init(MPIDI_VC_t * vc)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_CH3I_VC *vc_ch = &vc->ch;
MPID_nem_mxm_vc_area *vc_area = VC_BASE(vc);
MPIDI_STATE_DECL(MPID_STATE_MXM_VC_INIT);
MPIDI_FUNC_ENTER(MPID_STATE_MXM_VC_INIT);
/* local connection is used for any source communication */
MPIU_Assert(MPID_nem_mem_region.rank != vc->lpid);
MPIU_DBG_MSG_FMT(CH3_CHANNEL, VERBOSE,
(MPIU_DBG_FDEST,
"[%i]=== connecting to %i \n", MPID_nem_mem_region.rank, vc->lpid));
{
char *business_card;
int val_max_sz;
#ifdef USE_PMI2_API
val_max_sz = PMI2_MAX_VALLEN;
#else
mpi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
#endif
business_card = (char *) MPIU_Malloc(val_max_sz);
mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, val_max_sz, vc->pg);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
vc_area->ctx = vc;
vc_area->mxm_ep = &_mxm_obj.endpoint[vc->pg_rank];
mpi_errno = _mxm_connect(&_mxm_obj.endpoint[vc->pg_rank], business_card, vc_area);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
MPIU_Free(business_card);
}
MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
vc_area->pending_sends = 0;
vc->rndvSend_fn = NULL;
vc->rndvRecv_fn = NULL;
vc->sendNoncontig_fn = MPID_nem_mxm_SendNoncontig;
vc->comm_ops = &comm_ops;
vc_ch->iStartContigMsg = MPID_nem_mxm_iStartContigMsg;
vc_ch->iSendContig = MPID_nem_mxm_iSendContig;
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MXM_VC_INIT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例8: MPIDI_VCRT_Add_ref
int MPIDI_VCRT_Add_ref(struct MPIDI_VCRT *vcrt)
{
MPIDI_STATE_DECL(MPID_STATE_MPIDI_VCRT_ADD_REF);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_VCRT_ADD_REF);
MPIU_Object_add_ref(vcrt);
MPIU_DBG_MSG_FMT(REFCOUNT,TYPICAL,(MPIU_DBG_FDEST, "Incr VCRT %p ref count",vcrt));
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_VCRT_ADD_REF);
return MPI_SUCCESS;
}
示例9: MPIDI_CH3_EagerNoncontigSend
/* MPIDI_CH3_EagerNoncontigSend - Eagerly send noncontiguous data */
int MPIDI_CH3_EagerNoncontigSend( MPID_Request **sreq_p,
MPIDI_CH3_Pkt_type_t reqtype,
const void * buf, MPI_Aint count,
MPI_Datatype datatype, MPIDI_msg_sz_t data_sz,
int rank,
int tag, MPID_Comm * comm,
int context_offset )
{
int mpi_errno = MPI_SUCCESS;
MPIDI_VC_t * vc;
MPID_Request *sreq = *sreq_p;
MPIDI_CH3_Pkt_t upkt;
MPIDI_CH3_Pkt_eager_send_t * const eager_pkt = &upkt.eager_send;
MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
"sending non-contiguous eager message, data_sz=" MPIDI_MSG_SZ_FMT,
data_sz));
sreq->dev.OnDataAvail = 0;
sreq->dev.OnFinal = 0;
MPIDI_Pkt_init(eager_pkt, reqtype);
eager_pkt->match.parts.rank = comm->rank;
eager_pkt->match.parts.tag = tag;
eager_pkt->match.parts.context_id = comm->context_id + context_offset;
eager_pkt->sender_req_id = MPI_REQUEST_NULL;
eager_pkt->data_sz = data_sz;
MPIDI_Comm_get_vc_set_active(comm, rank, &vc);
MPIDI_VC_FAI_send_seqnum(vc, seqnum);
MPIDI_Pkt_set_seqnum(eager_pkt, seqnum);
MPIDI_Request_set_seqnum(sreq, seqnum);
MPIU_DBG_MSGPKT(vc,tag,eager_pkt->match.parts.context_id,rank,data_sz,
"Eager");
sreq->dev.segment_ptr = MPID_Segment_alloc( );
MPIR_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
sreq->dev.segment_first = 0;
sreq->dev.segment_size = data_sz;
MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex);
mpi_errno = vc->sendNoncontig_fn(vc, sreq, eager_pkt,
sizeof(MPIDI_CH3_Pkt_eager_send_t));
MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
fn_exit:
return mpi_errno;
fn_fail:
*sreq_p = NULL;
goto fn_exit;
}
示例10: MPIR_Setup_intercomm_localcomm
int MPIR_Setup_intercomm_localcomm(MPID_Comm * intercomm_ptr)
{
MPID_Comm *localcomm_ptr;
int mpi_errno = MPI_SUCCESS;
MPID_MPI_STATE_DECL(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM);
localcomm_ptr = (MPID_Comm *) MPIU_Handle_obj_alloc(&MPID_Comm_mem);
MPIR_ERR_CHKANDJUMP(!localcomm_ptr, mpi_errno, MPI_ERR_OTHER, "**nomem");
/* get sensible default values for most fields (usually zeros) */
mpi_errno = MPIR_Comm_init(localcomm_ptr);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
/* use the parent intercomm's recv ctx as the basis for our ctx */
localcomm_ptr->recvcontext_id =
MPID_CONTEXT_SET_FIELD(IS_LOCALCOMM, intercomm_ptr->recvcontext_id, 1);
localcomm_ptr->context_id = localcomm_ptr->recvcontext_id;
MPIU_DBG_MSG_FMT(COMM, TYPICAL,
(MPIU_DBG_FDEST,
"setup_intercomm_localcomm ic=%p ic->context_id=%d ic->recvcontext_id=%d lc->recvcontext_id=%d",
intercomm_ptr, intercomm_ptr->context_id, intercomm_ptr->recvcontext_id,
localcomm_ptr->recvcontext_id));
/* Save the kind of the communicator */
localcomm_ptr->comm_kind = MPID_INTRACOMM;
/* Set the sizes and ranks */
localcomm_ptr->remote_size = intercomm_ptr->local_size;
localcomm_ptr->local_size = intercomm_ptr->local_size;
localcomm_ptr->rank = intercomm_ptr->rank;
MPIR_Comm_map_dup(localcomm_ptr, intercomm_ptr, MPIR_COMM_MAP_DIR_L2L);
/* TODO More advanced version: if the group is available, dup it by
* increasing the reference count instead of recreating it later */
/* FIXME : No coll_fns functions for the collectives */
/* FIXME : No local functions for the topology routines */
intercomm_ptr->local_comm = localcomm_ptr;
/* sets up the SMP-aware sub-communicators and tables */
mpi_errno = MPIR_Comm_commit(localcomm_ptr);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
fn_fail:
MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_SETUP_INTERCOMM_LOCALCOMM);
return mpi_errno;
}
示例11: MPIR_Assert_fail
int MPIR_Assert_fail(const char *cond, const char *file_name, int line_num)
{
MPL_VG_PRINTF_BACKTRACE("Assertion failed in file %s at line %d: %s\n",
file_name, line_num, cond);
MPIU_Internal_error_printf("Assertion failed in file %s at line %d: %s\n",
file_name, line_num, cond);
MPIU_DBG_MSG_FMT(ALL, TERSE,
(MPIU_DBG_FDEST,
"Assertion failed in file %s at line %d: %s",
file_name, line_num, cond));
MPID_Abort(NULL, MPI_SUCCESS, 1, NULL);
return MPI_ERR_INTERN; /* never get here, abort should kill us */
}
示例12: MPIDI_CH3_EagerContigSend
int MPIDI_CH3_EagerContigSend( MPID_Request **sreq_p,
MPIDI_CH3_Pkt_type_t reqtype,
const void * buf, MPIDI_msg_sz_t data_sz, int rank,
int tag, MPID_Comm * comm, int context_offset )
{
int mpi_errno = MPI_SUCCESS;
MPIDI_VC_t * vc;
MPIDI_CH3_Pkt_t upkt;
MPIDI_CH3_Pkt_eager_send_t * const eager_pkt = &upkt.eager_send;
MPID_Request *sreq = *sreq_p;
MPL_IOV iov[2];
MPIDI_Pkt_init(eager_pkt, reqtype);
eager_pkt->match.parts.rank = comm->rank;
eager_pkt->match.parts.tag = tag;
eager_pkt->match.parts.context_id = comm->context_id + context_offset;
eager_pkt->sender_req_id = MPI_REQUEST_NULL;
eager_pkt->data_sz = data_sz;
iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)eager_pkt;
iov[0].MPL_IOV_LEN = sizeof(*eager_pkt);
MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
"sending contiguous eager message, data_sz=" MPIDI_MSG_SZ_FMT,
data_sz));
iov[1].MPL_IOV_BUF = (MPL_IOV_BUF_CAST) buf;
iov[1].MPL_IOV_LEN = data_sz;
MPIDI_Comm_get_vc_set_active(comm, rank, &vc);
MPIDI_VC_FAI_send_seqnum(vc, seqnum);
MPIDI_Pkt_set_seqnum(eager_pkt, seqnum);
MPIU_DBG_MSGPKT(vc,tag,eager_pkt->match.parts.context_id,rank,data_sz,"EagerContig");
MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex);
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, 2, sreq_p);
MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|eagermsg");
}
sreq = *sreq_p;
if (sreq != NULL)
{
MPIDI_Request_set_seqnum(sreq, seqnum);
MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
}
fn_fail:
return mpi_errno;
}
示例13: allocate_context_bit
/* Allocates a context ID from the given mask by clearing the bit
* corresponding to the the given id. Returns 0 on failure, id on
* success. */
static int allocate_context_bit(uint32_t mask[], MPIU_Context_id_t id)
{
int raw_prefix, idx, bitpos;
raw_prefix = MPID_CONTEXT_READ_FIELD(PREFIX, id);
idx = raw_prefix / MPIR_CONTEXT_INT_BITS;
bitpos = raw_prefix % MPIR_CONTEXT_INT_BITS;
/* the bit should not already be cleared (allocated) */
MPIU_Assert(mask[idx] & (1 << bitpos));
/* clear the bit */
mask[idx] &= ~(1 << bitpos);
MPIU_DBG_MSG_FMT(COMM, VERBOSE, (MPIU_DBG_FDEST,
"allocating contextid = %d, (mask=%p, mask[%d], bit %d)",
id, mask, idx, bitpos));
return id;
}
示例14: MPID_Recv
int MPID_Recv(void * buf, int count, MPI_Datatype datatype, int rank, int tag,
MPID_Comm * comm, int context_offset,
MPI_Status * status, MPID_Request ** request)
{
/* FIXME: in the common case, we want to simply complete the message
and make as few updates as possible.
Note in addition that this routine is used only by MPI_Recv (a
blocking routine; the intent of the interface (which returns
a request) was to simplify the handling of the case where the
message was not found in the unexpected queue. */
int mpi_errno = MPI_SUCCESS;
MPID_Request * rreq;
int found;
MPIDI_STATE_DECL(MPID_STATE_MPID_RECV);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_RECV);
MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
"rank=%d, tag=%d, context=%d", rank, tag,
comm->recvcontext_id + context_offset));
if (rank == MPI_PROC_NULL)
{
MPIR_Status_set_procnull(status);
rreq = NULL;
goto fn_exit;
}
/* Check to make sure the communicator hasn't already been revoked */
if (comm->revoked &&
MPIR_AGREE_TAG != MPIR_TAG_MASK_ERROR_BIT(tag & ~MPIR_Process.tagged_coll_mask) &&
MPIR_SHRINK_TAG != MPIR_TAG_MASK_ERROR_BIT(tag & ~MPIR_Process.tagged_coll_mask)) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPIX_ERR_REVOKED,"**revoked");
}
MPIU_THREAD_CS_ENTER(MSGQUEUE,);
rreq = MPIDI_CH3U_Recvq_FDU_or_AEP(rank, tag,
comm->recvcontext_id + context_offset,
comm, buf, count, datatype, &found);
if (rreq == NULL) {
MPIU_THREAD_CS_EXIT(MSGQUEUE,);
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomemreq");
}
示例15: MPID_nem_ptl_vc_terminated
int MPID_nem_ptl_vc_terminated(MPIDI_VC_t *vc)
{
/* This is called when the VC is to be terminated once all queued
sends have been sent. */
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_NEM_PTL_VC_TERMINATED);
MPIDI_FUNC_ENTER(MPID_NEM_PTL_VC_TERMINATED);
mpi_errno = MPIDI_CH3U_Handle_connection(vc, MPIDI_VC_EVENT_TERMINATED);
if(mpi_errno) MPIU_ERR_POP(mpi_errno);
fn_exit:
MPIDI_FUNC_EXIT(MPID_NEM_PTL_VC_TERMINATED);
return mpi_errno;
fn_fail:
MPIU_DBG_MSG_FMT(NEM_SOCK_DET, VERBOSE, (MPIU_DBG_FDEST, "failure. mpi_errno = %d", mpi_errno));
goto fn_exit;
}