本文整理汇总了C++中MPIR_FUNC_VERBOSE_STATE_DECL函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIR_FUNC_VERBOSE_STATE_DECL函数的具体用法?C++ MPIR_FUNC_VERBOSE_STATE_DECL怎么用?C++ MPIR_FUNC_VERBOSE_STATE_DECL使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIR_FUNC_VERBOSE_STATE_DECL函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPID_nem_mxm_vc_destroy
int MPID_nem_mxm_vc_destroy(MPIDI_VC_t * vc)
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MXM_VC_DESTROY);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MXM_VC_DESTROY);
/* Do nothing because
* finalize is called before vc destroy as result it is not possible
* to destroy endpoint here
*/
#if 0
MPID_nem_mxm_vc_area *vc_area = VC_BASE(vc);
if (vc_area->ctx == vc) {
mpi_errno = _mxm_disconnect(vc_area->mxm_ep);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
}
#endif
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MXM_VC_DESTROY);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例2: MPID_Close_port
/*@
MPID_Close_port - Close port
Input Parameters:
. port_name - Name of MPI port to close
Notes:
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_OTHER
@*/
int MPID_Close_port(const char *port_name)
{
int mpi_errno=MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_CLOSE_PORT);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_CLOSE_PORT);
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
/* The default for this function is 0 (no function).
A channel may define its own function and set it in the
init check above; such a function may be named MPIDI_CH3_Close_port */
if (portFns.ClosePort) {
mpi_errno = portFns.ClosePort( port_name );
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
}
else {
MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**notimpl" );
}
fn_fail:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_CLOSE_PORT);
return mpi_errno;
}
示例3: MPIDI_CH3U_Handle_send_req
int MPIDI_CH3U_Handle_send_req(MPIDI_VC_t * vc, MPIR_Request * sreq, int *complete)
{
int mpi_errno = MPI_SUCCESS;
int (*reqFn) (MPIDI_VC_t *, MPIR_Request *, int *);
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
/* Use the associated function rather than switching on the old ca field */
/* Routines can call the attached function directly */
reqFn = sreq->dev.OnDataAvail;
if (!reqFn) {
MPIR_Assert(MPIDI_Request_get_type(sreq) != MPIDI_REQUEST_TYPE_GET_RESP);
mpi_errno = MPID_Request_complete(sreq);
*complete = 1;
}
else {
mpi_errno = reqFn(vc, sreq, complete);
}
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例4: MPID_Comm_connect
int MPID_Comm_connect(const char * port_name, MPIR_Info * info, int root,
MPIR_Comm * comm, MPIR_Comm ** newcomm_ptr)
{
int mpi_errno=MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_COMM_CONNECT);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_COMM_CONNECT);
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
/* A channel may define its own function and set it in the
init check above; such a function may be named MPIDI_CH3_Comm_connect.
If the function is null, we signal a not-implemented error */
if (portFns.CommConnect) {
mpi_errno = portFns.CommConnect( port_name, info, root, comm,
newcomm_ptr );
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
}
else {
MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**notimpl" );
}
fn_fail:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_COMM_CONNECT);
return mpi_errno;
}
示例5: MPID_Rget_accumulate
int MPID_Rget_accumulate(const void *origin_addr, int origin_count,
MPI_Datatype origin_datatype, void *result_addr, int result_count,
MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
int target_count, MPI_Datatype target_datatype, MPI_Op op,
MPIR_Win * win_ptr, MPIR_Request ** request)
{
int mpi_errno = MPI_SUCCESS;
int dt_contig ATTRIBUTE((unused));
MPIR_Datatype*dtp;
MPI_Aint dt_true_lb ATTRIBUTE((unused));
intptr_t data_sz, trg_data_sz;
MPIR_Request *ureq;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_RGET_ACCUMULATE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_RGET_ACCUMULATE);
/* request-based RMA operations are only valid within a passive epoch */
MPIR_ERR_CHKANDJUMP(win_ptr->states.access_state != MPIDI_RMA_PER_TARGET &&
win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_CALLED &&
win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_ISSUED &&
win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_GRANTED,
mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync");
/* Create user request, initially cc=1, ref=1 */
ureq = MPIR_Request_create(MPIR_REQUEST_KIND__RMA);
MPIR_ERR_CHKANDJUMP(ureq == NULL, mpi_errno, MPI_ERR_OTHER, "**nomemreq");
/* This request is referenced by user and ch3 by default. */
MPIR_Object_set_ref(ureq, 2);
/* Note that GACC is only a no-op if no data goes in both directions */
MPIDI_Datatype_get_info(origin_count, origin_datatype, dt_contig, data_sz, dtp, dt_true_lb);
MPIDI_Datatype_get_info(origin_count, origin_datatype, dt_contig, trg_data_sz, dtp, dt_true_lb);
/* Enqueue or perform the RMA operation */
if (target_rank != MPI_PROC_NULL && (data_sz != 0 || trg_data_sz != 0)) {
mpi_errno = MPIDI_CH3I_Get_accumulate(origin_addr, origin_count,
origin_datatype, result_addr,
result_count, result_datatype,
target_rank, target_disp,
target_count, target_datatype, op, win_ptr, ureq);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
}
else {
mpi_errno = MPID_Request_complete(ureq);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
}
*request = ureq;
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_RGET_ACCUMULATE);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例6: MPIDI_CH3I_Win_init
static int MPIDI_CH3I_Win_init(MPI_Aint size, int disp_unit, int create_flavor, int model,
MPIR_Info * info, MPIR_Comm * comm_ptr, MPIR_Win ** win_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3I_WIN_INIT);
MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPIDI_CH3I_WIN_INIT);
(*win_ptr)->shm_base_addr = NULL;
(*win_ptr)->shm_segment_len = 0;
(*win_ptr)->shm_segment_handle = 0;
(*win_ptr)->shm_mutex = NULL;
(*win_ptr)->shm_mutex_segment_handle = 0;
(*win_ptr)->info_shm_base_addr = NULL;
(*win_ptr)->info_shm_segment_len = 0;
(*win_ptr)->info_shm_segment_handle = 0;
fn_exit:
MPIR_FUNC_VERBOSE_RMA_EXIT(MPID_STATE_MPIDI_CH3I_WIN_INIT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例7: MPIDI_CH3_Req_handler_rma_op_complete
int MPIDI_CH3_Req_handler_rma_op_complete(MPIR_Request * sreq)
{
int mpi_errno = MPI_SUCCESS;
MPIR_Request *ureq = NULL;
MPIR_Win *win_ptr = NULL;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE);
if (sreq->dev.rma_target_ptr != NULL) {
(sreq->dev.rma_target_ptr)->num_pkts_wait_for_local_completion--;
}
/* get window, decrement active request cnt on window */
MPIR_Win_get_ptr(sreq->dev.source_win_handle, win_ptr);
MPIR_Assert(win_ptr != NULL);
MPIDI_CH3I_RMA_Active_req_cnt--;
MPIR_Assert(MPIDI_CH3I_RMA_Active_req_cnt >= 0);
if (sreq->dev.request_handle != MPI_REQUEST_NULL) {
/* get user request */
MPIR_Request_get_ptr(sreq->dev.request_handle, ureq);
mpi_errno = MPID_Request_complete(ureq);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
}
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例8: MPIDU_shm_barrier
/* FIXME: this is not a scalable algorithm because everyone is polling on the same cacheline */
int MPIDU_shm_barrier(MPIDU_shm_barrier_t * barrier, int num_local)
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDU_SHM_BARRIER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDU_SHM_BARRIER);
if (num_local == 1)
goto fn_exit;
MPIR_ERR_CHKINTERNAL(!barrier_init, mpi_errno, "barrier not initialized");
if (OPA_fetch_and_incr_int(&barrier->val) == num_local - 1) {
OPA_store_int(&barrier->val, 0);
OPA_store_int(&barrier->wait, 1 - sense);
OPA_write_barrier();
} else {
/* wait */
while (OPA_load_int(&barrier->wait) == sense)
MPL_sched_yield(); /* skip */
}
sense = 1 - sense;
fn_fail:
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDU_SHM_BARRIER);
return mpi_errno;
}
示例9: MPIR_FUNC_VERBOSE_STATE_DECL
static MPIR_Request *create_request(MPL_IOV * iov, int iov_count, int iov_offset, size_t nb)
{
MPIR_Request *sreq;
int i;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_CREATE_REQUEST);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_CREATE_REQUEST);
sreq = MPIR_Request_create(MPIR_REQUEST_KIND__SEND);
/* --BEGIN ERROR HANDLING-- */
if (sreq == NULL)
return NULL;
/* --END ERROR HANDLING-- */
MPIR_Object_set_ref(sreq, 2);
for (i = 0; i < iov_count; i++) {
sreq->dev.iov[i] = iov[i];
}
if (iov_offset == 0) {
MPIR_Assert(iov[0].MPL_IOV_LEN == sizeof(MPIDI_CH3_Pkt_t));
sreq->dev.pending_pkt = *(MPIDI_CH3_Pkt_t *) iov[0].MPL_IOV_BUF;
sreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST) & sreq->dev.pending_pkt;
}
sreq->dev.iov[iov_offset].MPL_IOV_BUF =
(MPL_IOV_BUF_CAST) ((char *) sreq->dev.iov[iov_offset].MPL_IOV_BUF + nb);
sreq->dev.iov[iov_offset].MPL_IOV_LEN -= nb;
sreq->dev.iov_count = iov_count;
sreq->dev.OnDataAvail = 0;
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_CREATE_REQUEST);
return sreq;
}
示例10: MPID_Win_create_dynamic
int MPID_Win_create_dynamic(MPIR_Info * info, MPIR_Comm * comm_ptr, MPIR_Win ** win_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_WIN_CREATE_DYNAMIC);
MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPID_WIN_CREATE_DYNAMIC);
mpi_errno = win_init(0 /* spec defines size to be 0 */ ,
1 /* spec defines disp_unit to be 1 */ ,
MPI_WIN_FLAVOR_DYNAMIC, MPI_WIN_UNIFIED, info, comm_ptr, win_ptr);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
(*win_ptr)->base = MPI_BOTTOM;
mpi_errno = MPIDI_CH3U_Win_fns.create_dynamic(info, comm_ptr, win_ptr);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
fn_fail:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_WIN_CREATE_DYNAMIC);
return mpi_errno;
}
示例11: MPIDI_CH3_Win_pkt_orderings_init
int MPIDI_CH3_Win_pkt_orderings_init(MPIDI_CH3U_Win_pkt_ordering_t * win_pkt_orderings)
{
int mpi_errno = MPI_SUCCESS;
int netmod_ordering = 0;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3_WIN_PKT_ORDERINGS_INIT);
MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPIDI_CH3_WIN_PKT_ORDERINGS_INIT);
win_pkt_orderings->am_flush_ordered = 0;
if (MPID_nem_netmod_func && MPID_nem_netmod_func->get_ordering) {
mpi_errno = MPID_nem_netmod_func->get_ordering(&netmod_ordering);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
}
if (netmod_ordering > 0) {
/* Guarantees ordered AM flush only on ordered network.
* In other words, it is ordered only when both intra-node and inter-node
* connections are ordered. Otherwise we have to maintain the ordering per
* connection, which causes expensive O(P) structure or per-OP function calls.*/
win_pkt_orderings->am_flush_ordered = 1;
}
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3_WIN_PKT_ORDERINGS_INIT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例12: MPID_Win_create
int MPID_Win_create(void *base, MPI_Aint size, int disp_unit, MPIR_Info * info,
MPIR_Comm * comm_ptr, MPIR_Win ** win_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_WIN_CREATE);
MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPID_WIN_CREATE);
/* Check to make sure the communicator hasn't already been revoked */
if (comm_ptr->revoked) {
MPIR_ERR_SETANDJUMP(mpi_errno, MPIX_ERR_REVOKED, "**revoked");
}
mpi_errno =
win_init(size, disp_unit, MPI_WIN_FLAVOR_CREATE, MPI_WIN_UNIFIED, info, comm_ptr, win_ptr);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
(*win_ptr)->base = base;
mpi_errno = MPIDI_CH3U_Win_fns.create(base, size, disp_unit, info, comm_ptr, win_ptr);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
fn_fail:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_WIN_CREATE);
return mpi_errno;
}
示例13: vc_terminate
int vc_terminate(MPIDI_VC_t *vc)
{
int mpi_errno = MPI_SUCCESS;
int req_errno = MPI_SUCCESS;
MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc);
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_VC_TERMINATE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_VC_TERMINATE);
if (vc->state != MPIDI_VC_STATE_CLOSED) {
/* VC is terminated as a result of a fault. Complete
outstanding sends with an error and terminate
connection immediately. */
MPIR_ERR_SET1(req_errno, MPIX_ERR_PROC_FAILED, "**comm_fail", "**comm_fail %d", vc->pg_rank);
mpi_errno = MPID_nem_ptl_vc_terminated(vc);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
} else if (vc_ptl->num_queued_sends == 0) {
mpi_errno = MPID_nem_ptl_vc_terminated(vc);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
} else {
/* the send_queued function will call vc_terminated if vc->state is
CLOSED and the last queued send has been sent*/
}
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_VC_TERMINATE);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例14: MPID_Comm_connect
int MPID_Comm_connect(const char *port_name, MPIR_Info * info, int root, MPIR_Comm * comm,
MPIR_Comm ** newcomm_ptr)
{
int mpi_errno = MPI_SUCCESS;
int timeout = MPIR_CVAR_CH4_COMM_CONNECT_TIMEOUT;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_COMM_CONNECT);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_COMM_CONNECT);
if (info != NULL) {
int info_flag = 0;
char info_value[MPI_MAX_INFO_VAL + 1];
MPIR_Info_get_impl(info, "timeout", MPI_MAX_INFO_VAL, info_value, &info_flag);
if (info_flag) {
timeout = atoi(info_value);
}
}
mpi_errno = MPIDI_NM_mpi_comm_connect(port_name, info, root, timeout, comm, newcomm_ptr);
if (mpi_errno != MPI_SUCCESS) {
MPIR_ERR_POP(mpi_errno);
}
fn_exit:
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_COMM_CONNECT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例15: MPID_nem_ptl_init_id
int MPID_nem_ptl_init_id(MPIDI_VC_t *vc)
{
int mpi_errno = MPI_SUCCESS;
MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc);
char *bc;
int pmi_errno;
int val_max_sz;
MPIR_CHKLMEM_DECL(1);
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_NEM_PTL_INIT_ID);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_NEM_PTL_INIT_ID);
pmi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
MPIR_ERR_CHKANDJUMP1(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %d", pmi_errno);
MPIR_CHKLMEM_MALLOC(bc, char *, val_max_sz, mpi_errno, "bc");
mpi_errno = vc->pg->getConnInfo(vc->pg_rank, bc, val_max_sz, vc->pg);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
mpi_errno = MPID_nem_ptl_get_id_from_bc(bc, &vc_ptl->id, &vc_ptl->pt, &vc_ptl->ptg, &vc_ptl->ptc, &vc_ptl->ptr, &vc_ptl->ptrg, &vc_ptl->ptrc);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
vc_ptl->id_initialized = TRUE;
MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
fn_exit:
MPIR_CHKLMEM_FREEALL();
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_NEM_PTL_INIT_ID);
return mpi_errno;
fn_fail:
goto fn_exit;
}