本文整理汇总了C++中ib_dereg_mr函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_dereg_mr函数的具体用法?C++ ib_dereg_mr怎么用?C++ ib_dereg_mr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_dereg_mr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: krping_free_buffers
static void krping_free_buffers(struct krping_cb *cb)
{
DEBUG_LOG(PFX "krping_free_buffers called on cb %p\n", cb);
#if 0
dma_unmap_single(cb->pd->device->dma_device,
pci_unmap_addr(cb, recv_mapping),
sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
dma_unmap_single(cb->pd->device->dma_device,
pci_unmap_addr(cb, send_mapping),
sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
dma_unmap_single(cb->pd->device->dma_device,
pci_unmap_addr(cb, rdma_mapping),
cb->size, DMA_BIDIRECTIONAL);
#endif
contigfree(cb->rdma_buf, cb->size, M_DEVBUF);
if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
#if 0
dma_unmap_single(cb->pd->device->dma_device,
pci_unmap_addr(cb, start_mapping),
cb->size, DMA_BIDIRECTIONAL);
#endif
contigfree(cb->start_buf, cb->size, M_DEVBUF);
}
if (cb->use_dmamr)
ib_dereg_mr(cb->dma_mr);
else {
ib_dereg_mr(cb->send_mr);
ib_dereg_mr(cb->recv_mr);
ib_dereg_mr(cb->rdma_mr);
if (!cb->server)
ib_dereg_mr(cb->start_mr);
}
}
示例2: __frwr_reset_mr
static int
__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
{
struct rpcrdma_frmr *f = &r->frmr;
int rc;
rc = ib_dereg_mr(f->fr_mr);
if (rc) {
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
rc, r);
return rc;
}
f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
ia->ri_max_frmr_depth);
if (IS_ERR(f->fr_mr)) {
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
PTR_ERR(f->fr_mr), r);
return PTR_ERR(f->fr_mr);
}
dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
f->fr_state = FRMR_IS_INVALID;
return 0;
}
示例3: rpcrdma_ep_destroy
/*
* rpcrdma_ep_destroy
*
* Disconnect and destroy endpoint. After this, the only
* valid operations on the ep are to free it (if dynamically
* allocated) or re-create it.
*/
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
int rc;
dprintk("RPC: %s: entering, connected is %d\n",
__func__, ep->rep_connected);
cancel_delayed_work_sync(&ep->rep_connect_worker);
if (ia->ri_id->qp) {
rpcrdma_ep_disconnect(ep, ia);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
ib_free_cq(ep->rep_attr.recv_cq);
ib_free_cq(ep->rep_attr.send_cq);
if (ia->ri_dma_mr) {
rc = ib_dereg_mr(ia->ri_dma_mr);
dprintk("RPC: %s: ib_dereg_mr returned %i\n",
__func__, rc);
}
}
示例4: kmalloc
static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
{
struct ib_mr *mr;
struct scatterlist *sg;
struct svc_rdma_fastreg_mr *frmr;
u32 num_sg;
frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
if (!frmr)
goto err;
num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
if (IS_ERR(mr))
goto err_free_frmr;
sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL);
if (!sg)
goto err_free_mr;
sg_init_table(sg, RPCSVC_MAXPAGES);
frmr->mr = mr;
frmr->sg = sg;
INIT_LIST_HEAD(&frmr->frmr_list);
return frmr;
err_free_mr:
ib_dereg_mr(mr);
err_free_frmr:
kfree(frmr);
err:
return ERR_PTR(-ENOMEM);
}
示例5: __frwr_mr_reset
static int
__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
struct rpcrdma_frwr *frwr = &mr->frwr;
int rc;
rc = ib_dereg_mr(frwr->fr_mr);
if (rc) {
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
rc, mr);
return rc;
}
frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
ia->ri_max_frwr_depth);
if (IS_ERR(frwr->fr_mr)) {
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
PTR_ERR(frwr->fr_mr), mr);
return PTR_ERR(frwr->fr_mr);
}
dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
frwr->fr_state = FRWR_IS_INVALID;
return 0;
}
示例6: kmalloc
static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
{
struct ib_mr *mr;
struct ib_fast_reg_page_list *pl;
struct svc_rdma_fastreg_mr *frmr;
frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
if (!frmr)
goto err;
mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
if (IS_ERR(mr))
goto err_free_frmr;
pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
RPCSVC_MAXPAGES);
if (IS_ERR(pl))
goto err_free_mr;
frmr->mr = mr;
frmr->page_list = pl;
INIT_LIST_HEAD(&frmr->frmr_list);
return frmr;
err_free_mr:
ib_dereg_mr(mr);
err_free_frmr:
kfree(frmr);
err:
return ERR_PTR(-ENOMEM);
}
示例7: frwr_op_init_mr
static int
frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
{
unsigned int depth = ia->ri_max_frmr_depth;
struct rpcrdma_frmr *f = &r->frmr;
int rc;
f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
if (IS_ERR(f->fr_mr))
goto out_mr_err;
r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
if (!r->mw_sg)
goto out_list_err;
sg_init_table(r->mw_sg, depth);
init_completion(&f->fr_linv_done);
return 0;
out_mr_err:
rc = PTR_ERR(f->fr_mr);
dprintk("RPC: %s: ib_alloc_mr status %i\n",
__func__, rc);
return rc;
out_list_err:
rc = -ENOMEM;
dprintk("RPC: %s: sg allocation failure\n",
__func__);
ib_dereg_mr(f->fr_mr);
return rc;
}
示例8: ib_sock_mem_fini_common
static void ib_sock_mem_fini_common(struct IB_SOCK *sock)
{
if (sock->is_mem.ism_mr)
ib_dereg_mr(sock->is_mem.ism_mr);
if (sock->is_mem.ism_pd)
ib_dealloc_pd(sock->is_mem.ism_pd);
}
示例9: iser_create_device_ib_res
/**
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
* the adapator.
*
* returns 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
goto pd_err;
device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
ISER_MAX_RX_CQ_LEN, 0);
if (IS_ERR(device->rx_cq))
goto rx_cq_err;
device->tx_cq = ib_create_cq(device->ib_device,
NULL, iser_cq_event_callback,
(void *)device,
ISER_MAX_TX_CQ_LEN, 0);
if (IS_ERR(device->tx_cq))
goto tx_cq_err;
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
iser_cq_tasklet_fn,
(unsigned long)device);
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (IS_ERR(device->mr))
goto dma_mr_err;
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
iser_event_handler);
if (ib_register_event_handler(&device->event_handler))
goto handler_err;
return 0;
handler_err:
ib_dereg_mr(device->mr);
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
ib_destroy_cq(device->tx_cq);
tx_cq_err:
ib_destroy_cq(device->rx_cq);
rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
示例10: ib_dereg_mr
void IBMemBlock::close()
{
if ( _memoryRegion )
{
ib_api_status_t status = ib_dereg_mr( _memoryRegion );
_memoryRegion = 0;
}
_localKey = 0;
_remoteKey = 0;
}
示例11: fi_ib_mr_close
static int fi_ib_mr_close(struct fid *fid)
{
int ret;
struct fi_ib_mem_desc *md = (struct fi_ib_mem_desc *) fid;
print_trace("in\n");
ret = ib_dereg_mr(md->mr);
if (ret)
print_err("ib_dereg_mr returned %d\n", ret);
return ret;
}
示例12: rdma_dealloc_frmr_q
static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
{
struct svc_rdma_fastreg_mr *frmr;
while (!list_empty(&xprt->sc_frmr_q)) {
frmr = list_entry(xprt->sc_frmr_q.next,
struct svc_rdma_fastreg_mr, frmr_list);
list_del_init(&frmr->frmr_list);
ib_dereg_mr(frmr->mr);
ib_free_fast_reg_page_list(frmr->page_list);
kfree(frmr);
}
}
示例13: frwr_op_release_mr
static void
frwr_op_release_mr(struct rpcrdma_mw *r)
{
int rc;
/* Ensure MW is not on any rl_registered list */
if (!list_empty(&r->mw_list))
list_del(&r->mw_list);
rc = ib_dereg_mr(r->frmr.fr_mr);
if (rc)
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
r, rc);
kfree(r->mw_sg);
kfree(r);
}
示例14: isert_device_release
static void isert_device_release(struct isert_device *isert_dev)
{
int err, i;
TRACE_ENTRY();
lockdep_assert_held(&dev_list_mutex);
isert_dev_list_remove(isert_dev); /* remove from global list */
for (i = 0; i < isert_dev->num_cqs; ++i) {
struct isert_cq *cq_desc = &isert_dev->cq_desc[i];
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
/*
* cancel_work_sync() was introduced in 2.6.22. We can
* only wait until all scheduled work is done.
*/
flush_workqueue(cq_desc->cq_workqueue);
#else
cancel_work_sync(&cq_desc->cq_comp_work);
#endif
err = ib_destroy_cq(cq_desc->cq);
if (unlikely(err))
pr_err("Failed to destroy cq, err:%d\n", err);
destroy_workqueue(cq_desc->cq_workqueue);
}
err = ib_dereg_mr(isert_dev->mr);
if (unlikely(err))
pr_err("Failed to destroy mr, err:%d\n", err);
err = ib_dealloc_pd(isert_dev->pd);
if (unlikely(err))
pr_err("Failed to destroy pd, err:%d\n", err);
vfree(isert_dev->cq_desc);
isert_dev->cq_desc = NULL;
kfree(isert_dev->cq_qps);
isert_dev->cq_qps = NULL;
kfree(isert_dev);
TRACE_EXIT();
}
示例15: verbs_remove_device
static void verbs_remove_device (struct ib_device *dev)
{
printk (KERN_INFO "IB remove device called. Name = %s\n", dev->name);
if (ah)
ib_destroy_ah (ah);
if (qp)
ib_destroy_qp (qp);
if (send_cq)
ib_destroy_cq (send_cq);
if (recv_cq)
ib_destroy_cq (recv_cq);
if (mr)
ib_dereg_mr (mr);
if (pd)
ib_dealloc_pd (pd);
}