本文整理汇总了C++中OPAL_THREAD_ADD32函数的典型用法代码示例。如果您正苦于以下问题:C++ OPAL_THREAD_ADD32函数的具体用法?C++ OPAL_THREAD_ADD32怎么用?C++ OPAL_THREAD_ADD32使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OPAL_THREAD_ADD32函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: read_msg
static int
read_msg(void *start, ptl_size_t length, ptl_process_t target,
ptl_match_bits_t match_bits, ptl_size_t remote_offset,
ompi_mtl_portals4_recv_request_t *request)
{
int ret;
#if OMPI_MTL_PORTALS4_FLOW_CONTROL
while (OPAL_UNLIKELY(OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, -1) < 0)) {
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
ompi_mtl_portals4_progress();
}
#endif
ret = PtlGet(ompi_mtl_portals4.send_md_h,
(ptl_size_t) start,
length,
target,
ompi_mtl_portals4.read_idx,
match_bits,
remote_offset,
request);
if (OPAL_UNLIKELY(PTL_OK != ret)) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: PtlGet failed: %d",
__FILE__, __LINE__, ret);
return OMPI_ERR_OUT_OF_RESOURCE;
}
return OMPI_SUCCESS;
}
示例2: mca_btl_openib_get_internal
int mca_btl_openib_get_internal (mca_btl_base_module_t *btl, struct mca_btl_base_endpoint_t *ep,
mca_btl_openib_get_frag_t *frag)
{
int qp = to_base_frag(frag)->base.order;
struct ibv_send_wr *bad_wr;
/* check for a send wqe */
if (qp_get_wqe(ep, qp) < 0) {
qp_put_wqe(ep, qp);
return OPAL_ERR_OUT_OF_RESOURCE;
}
/* check for a get token */
if (OPAL_THREAD_ADD32(&ep->get_tokens,-1) < 0) {
qp_put_wqe(ep, qp);
OPAL_THREAD_ADD32(&ep->get_tokens,1);
return OPAL_ERR_OUT_OF_RESOURCE;
}
qp_inflight_wqe_to_frag(ep, qp, to_com_frag(frag));
qp_reset_signal_count(ep, qp);
if (ibv_post_send(ep->qps[qp].qp->lcl_qp, &frag->sr_desc, &bad_wr)) {
qp_put_wqe(ep, qp);
OPAL_THREAD_ADD32(&ep->get_tokens,1);
return OPAL_ERROR;
}
return OPAL_SUCCESS;
}
示例3: acquire_send_credit
static int acquire_send_credit(mca_btl_wv_endpoint_t *endpoint,
mca_btl_wv_send_frag_t *frag)
{
mca_btl_wv_module_t *wv_btl = endpoint->endpoint_btl;
int qp = to_base_frag(frag)->base.order;
int prio = !(to_base_frag(frag)->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY);
if(BTL_WV_QP_TYPE_PP(qp)) {
if(OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.sd_credits, -1) < 0) {
OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.sd_credits, 1);
opal_list_append(&endpoint->qps[qp].no_credits_pending_frags[prio],
(opal_list_item_t *)frag);
return OMPI_ERR_OUT_OF_RESOURCE;
}
} else {
if(OPAL_THREAD_ADD32(&wv_btl->qps[qp].u.srq_qp.sd_credits, -1) < 0)
{
OPAL_THREAD_ADD32(&wv_btl->qps[qp].u.srq_qp.sd_credits, 1);
OPAL_THREAD_LOCK(&wv_btl->ib_lock);
opal_list_append(&wv_btl->qps[qp].u.srq_qp.pending_frags[prio],
(opal_list_item_t *)frag);
OPAL_THREAD_UNLOCK(&wv_btl->ib_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
}
return OMPI_SUCCESS;
}
示例4: mca_btl_openib_get
int mca_btl_openib_get( mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* descriptor)
{
int rc;
struct ibv_send_wr* bad_wr;
mca_btl_openib_frag_t* frag = (mca_btl_openib_frag_t*) descriptor;
mca_btl_openib_module_t* openib_btl = (mca_btl_openib_module_t*) btl;
frag->endpoint = endpoint;
frag->wr_desc.sr_desc.opcode = IBV_WR_RDMA_READ;
/* check for a send wqe */
if (OPAL_THREAD_ADD32(&endpoint->sd_wqe[BTL_OPENIB_LP_QP],-1) < 0) {
OPAL_THREAD_ADD32(&endpoint->sd_wqe[BTL_OPENIB_LP_QP],1);
OPAL_THREAD_LOCK(&endpoint->endpoint_lock);
opal_list_append(&endpoint->pending_get_frags, (opal_list_item_t*)frag);
OPAL_THREAD_UNLOCK(&endpoint->endpoint_lock);
return OMPI_SUCCESS;
/* check for a get token */
} else if(OPAL_THREAD_ADD32(&endpoint->get_tokens,-1) < 0) {
OPAL_THREAD_ADD32(&endpoint->sd_wqe[BTL_OPENIB_LP_QP],1);
OPAL_THREAD_ADD32(&endpoint->get_tokens,1);
OPAL_THREAD_LOCK(&endpoint->endpoint_lock);
opal_list_append(&endpoint->pending_get_frags, (opal_list_item_t*)frag);
OPAL_THREAD_UNLOCK(&endpoint->endpoint_lock);
return OMPI_SUCCESS;
} else {
frag->wr_desc.sr_desc.send_flags = IBV_SEND_SIGNALED;
frag->wr_desc.sr_desc.wr.rdma.remote_addr = frag->base.des_src->seg_addr.lval;
frag->wr_desc.sr_desc.wr.rdma.rkey = frag->base.des_src->seg_key.key32[0];
frag->sg_entry.addr = (unsigned long) frag->base.des_dst->seg_addr.pval;
frag->sg_entry.length = frag->base.des_dst->seg_len;
if(ibv_post_send(endpoint->lcl_qp[BTL_OPENIB_LP_QP],
&frag->wr_desc.sr_desc,
&bad_wr)){
BTL_ERROR(("error posting send request errno (%d) says %s", errno, strerror(errno)));
rc = ORTE_ERROR;
} else {
rc = ORTE_SUCCESS;
}
if(mca_btl_openib_component.use_srq) {
mca_btl_openib_post_srr(openib_btl, 1, BTL_OPENIB_HP_QP);
mca_btl_openib_post_srr(openib_btl, 1, BTL_OPENIB_LP_QP);
} else {
btl_openib_endpoint_post_rr(endpoint, 1, BTL_OPENIB_HP_QP);
btl_openib_endpoint_post_rr(endpoint, 1, BTL_OPENIB_LP_QP);
}
}
return rc;
}
示例5: read_msg
static int
read_msg(void *start, ptl_size_t length, ptl_process_t target,
ptl_match_bits_t match_bits, ptl_size_t remote_offset,
ompi_mtl_portals4_recv_request_t *request)
{
ptl_md_t md;
int ret;
/* FIX ME: This needs to be on the send eq... */
md.start = start;
md.length = length;
md.options = 0;
md.eq_handle = ompi_mtl_portals4.send_eq_h;
md.ct_handle = PTL_CT_NONE;
ret = PtlMDBind(ompi_mtl_portals4.ni_h,
&md,
&request->md_h);
if (OPAL_UNLIKELY(PTL_OK != ret)) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: PtlMDBind failed: %d",
__FILE__, __LINE__, ret);
return OMPI_ERR_OUT_OF_RESOURCE;
}
#if OMPI_MTL_PORTALS4_FLOW_CONTROL
while (OPAL_UNLIKELY(OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, -1) < 0)) {
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
ompi_mtl_portals4_progress();
}
#endif
ret = PtlGet(request->md_h,
0,
md.length,
target,
ompi_mtl_portals4.read_idx,
match_bits,
remote_offset,
request);
if (OPAL_UNLIKELY(PTL_OK != ret)) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: PtlGet failed: %d",
__FILE__, __LINE__, ret);
PtlMDRelease(request->md_h);
return OMPI_ERR_OUT_OF_RESOURCE;
}
return OMPI_SUCCESS;
}
示例6: mca_spml_ikrit_mxm_fence
static int mca_spml_ikrit_mxm_fence(int dst)
{
mca_spml_ikrit_get_request_t *fence_req;
fence_req = alloc_get_req();
if (NULL == fence_req) {
SPML_ERROR("out of get requests - aborting");
oshmem_shmem_abort(-1);
return OSHMEM_ERROR;
}
fence_req->mxm_req.base.mq = mca_spml_ikrit.mxm_mq;
fence_req->mxm_req.base.conn = mca_spml_ikrit.mxm_peers[dst]->mxm_conn;
#if MXM_API < MXM_VERSION(2,0)
fence_req->mxm_req.opcode = MXM_REQ_OP_FENCE;
fence_req->mxm_req.base.flags = MXM_REQ_FLAG_SEND_SYNC;
#else
fence_req->mxm_req.opcode = MXM_REQ_OP_PUT_SYNC;
fence_req->mxm_req.flags = MXM_REQ_SEND_FLAG_FENCE;
fence_req->mxm_req.op.mem.remote_vaddr = 0;
fence_req->mxm_req.op.mem.remote_mkey = &mxm_empty_mem_key;
fence_req->mxm_req.base.data_type = MXM_REQ_DATA_BUFFER;
fence_req->mxm_req.base.data.buffer.ptr = 0;
fence_req->mxm_req.base.data.buffer.length = 0;
#endif
fence_req->mxm_req.base.state = MXM_REQ_NEW;
fence_req->mxm_req.base.completed_cb = fence_completion_cb;
fence_req->mxm_req.base.context = fence_req;
OPAL_THREAD_ADD32(&mca_spml_ikrit.n_mxm_fences, 1);
SPML_IKRIT_MXM_POST_SEND(fence_req->mxm_req);
return OSHMEM_SUCCESS;
}
示例7: bcol_basesmuma_free_payload_buff
/* gvm
* A collective operation calls this routine to release the payload buffer.
* All processes in the shared memory sub-group of a bcol should call the non-blocking
* barrier on the last payload buffer of a memory bank. On the completion
* of the non-blocking barrier, the ML callback is called which is responsible
* for recycling the memory bank.
*/
mca_bcol_basesmuma_module_t *sm_bcol_module
int bcol_basesmuma_free_payload_buff(
struct mca_bcol_base_memory_block_desc_t *block,
sm_buffer_mgmt *ctl_mgmt,
uint64_t buff_id)
{
/* local variables */
int ret = OMPI_SUCCESS;
memory_bank = BANK_FROM_BUFFER_IDX(buff_id);
ctl_mgmt->ctl_buffs_mgmt[memory_bank].n_buffs_freed++;
OPAL_THREAD_ADD32(&(ctl_mgmt->ctl_buffs_mgmt[memory_bank].n_buffs_freed),1);
if (ctl_mgmt->ctl_buffs_mgmt[memory_bank].n_buffs_freed == block->size_buffers_bank){
/* start non-blocking barrier */
bcol_basesmuma_rd_nb_barrier_init_admin(
&(ctl_mgmt->ctl_buffs_mgmt[memory_bank].nb_barrier_desc));
if (NB_BARRIER_DONE !=
ctl_mgmt->ctl_buffs_mgmt[memory_bank].
nb_barrier_desc.collective_phase){
/* progress the barrier */
opal_progress();
}
else{
/* free the buffer - i.e. initiate callback to ml level */
block->ml_release_cb(block,memory_bank);
}
}
return ret;
}
示例8: mca_btl_portals_free
int
mca_btl_portals_free(struct mca_btl_base_module_t* btl_base,
mca_btl_base_descriptor_t* des)
{
mca_btl_portals_frag_t* frag = (mca_btl_portals_frag_t*) des;
assert(&mca_btl_portals_module == (mca_btl_portals_module_t*) btl_base);
if (BTL_PORTALS_FRAG_TYPE_EAGER == frag->type) {
/* don't ever unlink eager frags */
OMPI_BTL_PORTALS_FRAG_RETURN_EAGER(&mca_btl_portals_module.super, frag);
} else if (BTL_PORTALS_FRAG_TYPE_MAX == frag->type) {
if (frag->md_h != PTL_INVALID_HANDLE) {
PtlMDUnlink(frag->md_h);
frag->md_h = PTL_INVALID_HANDLE;
}
OMPI_BTL_PORTALS_FRAG_RETURN_MAX(&mca_btl_portals_module.super, frag);
} else if (BTL_PORTALS_FRAG_TYPE_USER == frag->type) {
if (frag->md_h != PTL_INVALID_HANDLE) {
PtlMDUnlink(frag->md_h);
frag->md_h = PTL_INVALID_HANDLE;
}
OPAL_THREAD_ADD32(&mca_btl_portals_module.portals_outstanding_ops, -1);
OMPI_BTL_PORTALS_FRAG_RETURN_USER(&mca_btl_portals_module.super, frag);
} else {
return OMPI_ERR_BAD_PARAM;
}
return OMPI_SUCCESS;
}
示例9: mca_btl_portals4_free
int
mca_btl_portals4_free(struct mca_btl_base_module_t* btl_base,
mca_btl_base_descriptor_t* des)
{
struct mca_btl_portals4_module_t* portals4_btl = (struct mca_btl_portals4_module_t*) btl_base;
mca_btl_portals4_frag_t* frag = (mca_btl_portals4_frag_t*) des;
if (BTL_PORTALS4_FRAG_TYPE_EAGER == frag->type) {
/* don't ever unlink eager frags */
OPAL_BTL_PORTALS4_FRAG_RETURN_EAGER(portals4_btl, frag);
} else if (BTL_PORTALS4_FRAG_TYPE_MAX == frag->type) {
if (frag->me_h != PTL_INVALID_HANDLE) {
frag->me_h = PTL_INVALID_HANDLE;
}
OPAL_BTL_PORTALS4_FRAG_RETURN_MAX(portals4_btl, frag);
} else if (BTL_PORTALS4_FRAG_TYPE_USER == frag->type) {
if (frag->me_h != PTL_INVALID_HANDLE) {
frag->me_h = PTL_INVALID_HANDLE;
}
OPAL_THREAD_ADD32(&portals4_btl->portals_outstanding_ops, -1);
OPAL_OUTPUT_VERBOSE((90, opal_btl_base_framework.framework_output,
"mca_btl_portals4_free: Decrementing portals_outstanding_ops=%d\n", portals4_btl->portals_outstanding_ops));
OPAL_BTL_PORTALS4_FRAG_RETURN_USER(portals4_btl, frag);
} else {
return OPAL_ERR_BAD_PARAM;
}
return OPAL_SUCCESS;
}
示例10: mca_btl_portals4_del_procs
int
mca_btl_portals4_del_procs(struct mca_btl_base_module_t *btl,
size_t nprocs,
struct opal_proc_t **procs,
struct mca_btl_base_endpoint_t **btl_peer_data)
{
struct mca_btl_portals4_module_t* portals4_btl = (struct mca_btl_portals4_module_t*) btl;
size_t i;
opal_output_verbose(50, opal_btl_base_framework.framework_output,
"mca_btl_portals4_del_procs: Removing %d procs (%d)", (int) nprocs,
(int) portals4_btl->portals_num_procs);
/* See comment in btl_portals4_endpoint.h about why we look at the
portals4 entry in proc_endpoints instead of the peer_data */
for (i = 0 ; i < nprocs ; ++i) {
free(btl_peer_data[i]);
OPAL_THREAD_ADD32(&portals4_btl->portals_num_procs, -1);
}
if (0 == portals4_btl->portals_num_procs)
mca_btl_portals4_free_module(portals4_btl);
return OPAL_SUCCESS;
}
示例11: ompi_osc_pt2pt_flush_active_frag
static int ompi_osc_pt2pt_flush_active_frag (ompi_osc_pt2pt_module_t *module, ompi_osc_pt2pt_peer_t *peer)
{
ompi_osc_pt2pt_frag_t *active_frag = peer->active_frag;
int ret = OMPI_SUCCESS;
if (NULL == active_frag) {
/* nothing to do */
return OMPI_SUCCESS;
}
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
"osc pt2pt: flushing active fragment to target %d. pending: %d",
active_frag->target, active_frag->pending));
if (opal_atomic_cmpset (&peer->active_frag, active_frag, NULL)) {
if (0 != OPAL_THREAD_ADD32(&active_frag->pending, -1)) {
/* communication going on while synchronizing; this is an rma usage bug */
return OMPI_ERR_RMA_SYNC;
}
ompi_osc_signal_outgoing (module, active_frag->target, 1);
ret = frag_send (module, active_frag);
}
return ret;
}
示例12: mca_pml_base_bsend_init
/*
* One time initialization at startup
*/
int mca_pml_base_bsend_init(bool thread_safe)
{
size_t tmp;
if(OPAL_THREAD_ADD32(&mca_pml_bsend_init, 1) > 1)
return OMPI_SUCCESS;
/* initialize static objects */
OBJ_CONSTRUCT(&mca_pml_bsend_mutex, opal_mutex_t);
OBJ_CONSTRUCT(&mca_pml_bsend_condition, opal_condition_t);
/* lookup name of the allocator to use for buffered sends */
if(NULL == (mca_pml_bsend_allocator_component = mca_allocator_component_lookup(ompi_pml_base_bsend_allocator_name))) {
return OMPI_ERR_BUFFER;
}
/* determine page size */
tmp = mca_pml_bsend_pagesz = sysconf(_SC_PAGESIZE);
mca_pml_bsend_pagebits = 0;
while( tmp != 0 ) {
tmp >>= 1;
mca_pml_bsend_pagebits++;
}
return OMPI_SUCCESS;
}
示例13: ompi_mtl_portals4_pending_list_progress
void
ompi_mtl_portals4_pending_list_progress()
{
int ret, val;
opal_list_item_t *item;
ompi_mtl_portals4_pending_request_t *pending;
while ((!ompi_mtl_portals4.flowctl.flowctl_active) &&
(0 != opal_list_get_size(&ompi_mtl_portals4.flowctl.pending_sends))) {
val = OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, -1);
if (val < 0) {
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
return;
}
item = opal_list_remove_first(&ompi_mtl_portals4.flowctl.pending_sends);
if (OPAL_UNLIKELY(NULL == item)) {
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
return;
}
pending = (ompi_mtl_portals4_pending_request_t*) item;
if (pending->length <= ompi_mtl_portals4.eager_limit) {
ret = ompi_mtl_portals4_short_isend(pending->mode,
pending->start,
pending->length,
pending->contextid,
pending->tag,
pending->my_rank,
pending->ptl_proc,
pending->ptl_request);
} else {
ret = ompi_mtl_portals4_long_isend(pending->start,
pending->length,
pending->contextid,
pending->tag,
pending->my_rank,
pending->ptl_proc,
pending->ptl_request);
}
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
opal_list_prepend(&ompi_mtl_portals4.flowctl.pending_sends,
&pending->super.super);
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
}
}
}
示例14: ompi_osc_pt2pt_sendreq_send_cb
static void
ompi_osc_pt2pt_sendreq_send_cb(ompi_osc_pt2pt_buffer_t *buffer)
{
ompi_osc_pt2pt_sendreq_t *sendreq =
(ompi_osc_pt2pt_sendreq_t*) buffer->cbdata;
ompi_osc_pt2pt_send_header_t *header =
(ompi_osc_pt2pt_send_header_t*) buffer->payload;
/* have to look at header, and not the sendreq because in the case
of get, it's possible that the sendreq has been freed already
(if the remote side replies before we get our send completion
callback) and already allocated to another request. We don't
wait for this completion before exiting a synchronization point
in the case of get, as we really don't care when it completes -
only when the data arrives. */
if (OMPI_OSC_PT2PT_HDR_GET != header->hdr_base.hdr_type) {
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (header->hdr_base.hdr_flags & OMPI_OSC_PT2PT_HDR_FLAG_NBO) {
OMPI_OSC_PT2PT_SEND_HDR_NTOH(*header);
}
#endif
/* do we need to post a send? */
if (header->hdr_msg_length != 0) {
/* sendreq is done. Mark it as so and get out of here */
OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), -1);
ompi_osc_pt2pt_sendreq_free(sendreq);
} else {
ompi_osc_pt2pt_longreq_t *longreq;
ompi_osc_pt2pt_longreq_alloc(&longreq);
longreq->req_comp_cb = ompi_osc_pt2pt_sendreq_send_long_cb;
longreq->req_comp_cbdata = sendreq;
opal_output_verbose(50, ompi_osc_base_output,
"%d starting long sendreq to %d (%d)",
sendreq->req_module->p2p_comm->c_my_rank,
sendreq->req_target_rank,
header->hdr_origin_tag);
mca_pml.pml_isend(sendreq->req_origin_convertor.pBaseBuf,
sendreq->req_origin_convertor.count,
sendreq->req_origin_datatype,
sendreq->req_target_rank,
header->hdr_origin_tag,
MCA_PML_BASE_SEND_STANDARD,
sendreq->req_module->p2p_comm,
&(longreq->req_pml_req));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&(sendreq->req_module->p2p_lock));
opal_list_append(&(sendreq->req_module->p2p_long_msgs),
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&(sendreq->req_module->p2p_lock));
}
}
/* release the buffer */
OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers,
&buffer->super);
}
示例15: mca_btl_mvapi_endpoint_send_credits_hp
void mca_btl_mvapi_endpoint_send_credits_hp(
mca_btl_mvapi_endpoint_t* endpoint)
{
mca_btl_mvapi_module_t* mvapi_btl = endpoint->endpoint_btl;
mca_btl_mvapi_frag_t* frag;
int ret;
MCA_BTL_IB_FRAG_ALLOC_EAGER(mvapi_btl, frag, ret);
if(NULL == frag) {
BTL_ERROR(("error allocating fragment"));
return;
}
frag->base.des_cbfunc = mca_btl_mvapi_endpoint_credits_hp;
frag->base.des_cbdata = NULL;
frag->endpoint = endpoint;
frag->hdr->tag = MCA_BTL_TAG_BTL;
frag->hdr->credits =
(endpoint->rd_credits_hp > 0) ? endpoint->rd_credits_hp: 0;
OPAL_THREAD_ADD32(&endpoint->rd_credits_hp, -frag->hdr->credits);
frag->hdr->rdma_credits = endpoint->eager_rdma_local.credits;
OPAL_THREAD_ADD32(&endpoint->eager_rdma_local.credits,
-frag->hdr->rdma_credits);
((mca_btl_mvapi_control_header_t *)frag->segment.seg_addr.pval)->type = MCA_BTL_MVAPI_CONTROL_NOOP;
frag->desc.sr_desc.opcode = VAPI_SEND;
frag->sg_entry.addr = (VAPI_virt_addr_t) (MT_virt_addr_t) frag->hdr;
frag->sg_entry.len = sizeof(mca_btl_mvapi_header_t) +
sizeof(mca_btl_mvapi_control_header_t);
if(sizeof(mca_btl_mvapi_header_t) <= mvapi_btl->ib_inline_max) {
ret = EVAPI_post_inline_sr(mvapi_btl->nic, endpoint->lcl_qp_hndl_hp, &frag->desc.sr_desc);
} else {
ret = VAPI_post_sr(mvapi_btl->nic, endpoint->lcl_qp_hndl_hp, &frag->desc.sr_desc);
}
if(ret != VAPI_SUCCESS) {
OPAL_THREAD_ADD32(&endpoint->sd_credits_lp, -1);
OPAL_THREAD_ADD32(&endpoint->rd_credits_lp, frag->hdr->credits);
MCA_BTL_IB_FRAG_RETURN(mvapi_btl, frag);
BTL_ERROR(("error posting send request errno %d says %s", strerror(errno)));
return;
}
}