本文整理汇总了C++中ib_destroy_cq函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_destroy_cq函数的具体用法?C++ ib_destroy_cq怎么用?C++ ib_destroy_cq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_destroy_cq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: roq_eth_cleanup_ofa
static void
roq_eth_cleanup_ofa(struct roq_eth_priv *vdev)
{
int i;
if (vdev->send_cq)
ib_destroy_cq(vdev->send_cq);
if (vdev->recv_cq && vdev->recv_cq != vdev->send_cq)
ib_destroy_cq(vdev->recv_cq);
if (vdev->qps) {
for (i = 0; i < vdev->part_size; i++)
if (vdev->qps[i])
ib_destroy_qp(vdev->qps[i]);
kfree(vdev->qps);
}
if (vdev->qps_rem) {
for (i = 0; i < vdev->rem_part_size; i++)
if (vdev->qps_rem[i])
ib_destroy_qp(vdev->qps_rem[i]);
kfree(vdev->qps_rem);
}
if (vdev->kpd)
ib_dealloc_pd(vdev->kpd);
vdev->qps = vdev->qps_rem = NULL;
vdev->recv_cq = vdev->send_cq = NULL;
vdev->kpd = NULL;
return;
}
示例2: rpcrdma_ep_destroy
/*
* rpcrdma_ep_destroy
*
* Disconnect and destroy endpoint. After this, the only
* valid operations on the ep are to free it (if dynamically
* allocated) or re-create it.
*/
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
int rc;
dprintk("RPC: %s: entering, connected is %d\n",
__func__, ep->rep_connected);
cancel_delayed_work_sync(&ep->rep_connect_worker);
if (ia->ri_id->qp) {
rpcrdma_ep_disconnect(ep, ia);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
rpcrdma_free_regbuf(ia, ep->rep_padbuf);
rpcrdma_clean_cq(ep->rep_attr.recv_cq);
rc = ib_destroy_cq(ep->rep_attr.recv_cq);
if (rc)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, rc);
rpcrdma_clean_cq(ep->rep_attr.send_cq);
rc = ib_destroy_cq(ep->rep_attr.send_cq);
if (rc)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, rc);
}
示例3: ib_destroy_cq
void IBCompletionQueue::close()
{
if( _cqR )
{
ib_destroy_cq( _cqR, 0 );
_cqR = 0;
}
if( _cqW )
{
ib_destroy_cq( _cqW, 0 );
_cqW = 0;
}
if ( _cqWaitobjR )
{
CloseHandle( _cqWaitobjR );
_cqWaitobjR = 0;
}
if ( _cqWaitobjR )
{
CloseHandle( _cqWaitobjW );
_cqWaitobjW = 0;
}
}
示例4: iser_create_device_ib_res
/**
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
* the adapator.
*
* returns 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
goto pd_err;
device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
ISER_MAX_RX_CQ_LEN, 0);
if (IS_ERR(device->rx_cq))
goto rx_cq_err;
device->tx_cq = ib_create_cq(device->ib_device,
NULL, iser_cq_event_callback,
(void *)device,
ISER_MAX_TX_CQ_LEN, 0);
if (IS_ERR(device->tx_cq))
goto tx_cq_err;
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
iser_cq_tasklet_fn,
(unsigned long)device);
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (IS_ERR(device->mr))
goto dma_mr_err;
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
iser_event_handler);
if (ib_register_event_handler(&device->event_handler))
goto handler_err;
return 0;
handler_err:
ib_dereg_mr(device->mr);
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
ib_destroy_cq(device->tx_cq);
tx_cq_err:
ib_destroy_cq(device->rx_cq);
rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
示例5: sdp_tx_ring_destroy
void
sdp_tx_ring_destroy(struct sdp_sock *ssk)
{
sdp_dbg(ssk->socket, "tx ring destroy\n");
SDP_WLOCK(ssk);
callout_stop(&ssk->tx_ring.timer);
callout_stop(&ssk->nagle_timer);
SDP_WUNLOCK(ssk);
callout_drain(&ssk->tx_ring.timer);
callout_drain(&ssk->nagle_timer);
if (ssk->tx_ring.buffer) {
sdp_tx_ring_purge(ssk);
kfree(ssk->tx_ring.buffer);
ssk->tx_ring.buffer = NULL;
}
if (ssk->tx_ring.cq) {
if (ib_destroy_cq(ssk->tx_ring.cq)) {
sdp_warn(ssk->socket, "destroy cq(%p) failed\n",
ssk->tx_ring.cq);
} else {
ssk->tx_ring.cq = NULL;
}
}
WARN_ON(ring_head(ssk->tx_ring) != ring_tail(ssk->tx_ring));
}
示例6: sdp_tx_ring_destroy
void sdp_tx_ring_destroy(struct sdp_sock *ssk)
{
del_timer_sync(&ssk->tx_ring.timer);
if (ssk->nagle_timer.function)
del_timer_sync(&ssk->nagle_timer);
if (ssk->tx_ring.buffer) {
sdp_tx_ring_purge(ssk);
kfree(ssk->tx_ring.buffer);
ssk->tx_ring.buffer = NULL;
}
if (ssk->tx_ring.cq) {
if (ib_destroy_cq(ssk->tx_ring.cq)) {
sdp_warn(sk_ssk(ssk), "destroy cq(%p) failed\n",
ssk->tx_ring.cq);
} else {
ssk->tx_ring.cq = NULL;
}
}
tasklet_kill(&ssk->tx_ring.tasklet);
/* tx_cq is destroyed, so no more tx_irq, so no one will schedule this
* tasklet. */
SDP_WARN_ON(ring_head(ssk->tx_ring) != ring_tail(ssk->tx_ring));
}
示例7: rpcrdma_ep_destroy
/*
* rpcrdma_ep_destroy
*
* Disconnect and destroy endpoint. After this, the only
* valid operations on the ep are to free it (if dynamically
* allocated) or re-create it.
*
* The caller's error handling must be sure to not leak the endpoint
* if this function fails.
*/
int
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
int rc;
dprintk("RPC: %s: entering, connected is %d\n",
__func__, ep->rep_connected);
if (ia->ri_id->qp) {
rc = rpcrdma_ep_disconnect(ep, ia);
if (rc)
dprintk("RPC: %s: rpcrdma_ep_disconnect"
" returned %i\n", __func__, rc);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
/* padding - could be done in rpcrdma_buffer_destroy... */
if (ep->rep_pad_mr) {
rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
ep->rep_pad_mr = NULL;
}
rpcrdma_clean_cq(ep->rep_cq);
rc = ib_destroy_cq(ep->rep_cq);
if (rc)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, rc);
return rc;
}
示例8: sdp_rx_ring_destroy
void
sdp_rx_ring_destroy(struct sdp_sock *ssk)
{
cancel_work_sync(&ssk->rx_comp_work);
rx_ring_destroy_lock(&ssk->rx_ring);
if (ssk->rx_ring.buffer) {
sdp_rx_ring_purge(ssk);
kfree(ssk->rx_ring.buffer);
ssk->rx_ring.buffer = NULL;
}
if (ssk->rx_ring.cq) {
if (ib_destroy_cq(ssk->rx_ring.cq)) {
sdp_warn(ssk->socket, "destroy cq(%p) failed\n",
ssk->rx_ring.cq);
} else {
ssk->rx_ring.cq = NULL;
}
}
WARN_ON(ring_head(ssk->rx_ring) != ring_tail(ssk->rx_ring));
}
示例9: iser_free_device_ib_res
/**
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
* CQ and PD created with the device associated with the adapator.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
BUG_ON(device->mr == NULL);
tasklet_kill(&device->cq_tasklet);
(void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
(void)ib_destroy_cq(device->tx_cq);
(void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
device->mr = NULL;
device->tx_cq = NULL;
device->rx_cq = NULL;
device->pd = NULL;
}
示例10: verbs_remove_device
static void verbs_remove_device (struct ib_device *dev)
{
printk (KERN_INFO "IB remove device called. Name = %s\n", dev->name);
if (ah)
ib_destroy_ah (ah);
if (qp)
ib_destroy_qp (qp);
if (send_cq)
ib_destroy_cq (send_cq);
if (recv_cq)
ib_destroy_cq (recv_cq);
if (mr)
ib_dereg_mr (mr);
if (pd)
ib_dealloc_pd (pd);
}
示例11: get_port_caps
static int get_port_caps(struct mlx5_ib_dev *dev)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
int port;
struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
goto out;
dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
if (!dprops)
goto out;
err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
goto out;
}
for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
if (err) {
mlx5_ib_warn(dev, "query_port %d failed %d\n",
port, err);
break;
}
dev->mdev->port_caps[port - 1].pkey_table_len =
dprops->max_pkeys;
dev->mdev->port_caps[port - 1].gid_table_len =
pprops->gid_tbl_len;
mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
dprops->max_pkeys, pprops->gid_tbl_len);
}
out:
kfree(pprops);
kfree(dprops);
return err;
}
static void destroy_umrc_res(struct mlx5_ib_dev *dev)
{
int err;
err = mlx5_mr_cache_cleanup(dev);
if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
mlx5_ib_destroy_qp(dev->umrc.qp);
ib_destroy_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
}
示例12: ib_uverbs_destroy_cq
ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_destroy_cq cmd;
struct ib_uverbs_destroy_cq_resp resp;
struct ib_cq *cq;
struct ib_ucq_object *uobj;
struct ib_uverbs_event_file *ev_file;
u64 user_handle;
int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
memset(&resp, 0, sizeof resp);
mutex_lock(&ib_uverbs_idr_mutex);
cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
if (!cq || cq->uobject->context != file->ucontext)
goto out;
user_handle = cq->uobject->user_handle;
uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
ev_file = cq->cq_context;
ret = ib_destroy_cq(cq);
if (ret)
goto out;
idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
mutex_lock(&file->mutex);
list_del(&uobj->uobject.list);
mutex_unlock(&file->mutex);
ib_uverbs_release_ucq(file, ev_file, uobj);
resp.comp_events_reported = uobj->comp_events_reported;
resp.async_events_reported = uobj->async_events_reported;
kfree(uobj);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
out:
mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
示例13: ib_cmrc_shutdown
/**
* Shut down CMRC connection gracefully
*
* @v cmrc Communication-Managed Reliable Connection
*
* The Infiniband data structures are not reference-counted or
* guarded. It is therefore unsafe to shut them down while we may be
* in the middle of a callback from the Infiniband stack (e.g. in a
* receive completion handler).
*
* This shutdown process will run some time after the call to
* ib_cmrc_close(), after control has returned out of the Infiniband
* core, and will shut down the Infiniband interfaces cleanly.
*
* The shutdown process holds an implicit reference on the CMRC
* connection, ensuring that the structure is not freed before the
* shutdown process has run.
*/
static void ib_cmrc_shutdown ( struct ib_cmrc_connection *cmrc ) {
DBGC ( cmrc, "CMRC %p shutting down\n", cmrc );
/* Shut down Infiniband interface */
ib_destroy_conn ( cmrc->ibdev, cmrc->qp, cmrc->conn );
ib_destroy_qp ( cmrc->ibdev, cmrc->qp );
ib_destroy_cq ( cmrc->ibdev, cmrc->cq );
ib_close ( cmrc->ibdev );
/* Drop the remaining reference */
ref_put ( &cmrc->refcnt );
}
示例14: isert_device_release
static void isert_device_release(struct isert_device *isert_dev)
{
int err, i;
TRACE_ENTRY();
lockdep_assert_held(&dev_list_mutex);
isert_dev_list_remove(isert_dev); /* remove from global list */
for (i = 0; i < isert_dev->num_cqs; ++i) {
struct isert_cq *cq_desc = &isert_dev->cq_desc[i];
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
/*
* cancel_work_sync() was introduced in 2.6.22. We can
* only wait until all scheduled work is done.
*/
flush_workqueue(cq_desc->cq_workqueue);
#else
cancel_work_sync(&cq_desc->cq_comp_work);
#endif
err = ib_destroy_cq(cq_desc->cq);
if (unlikely(err))
pr_err("Failed to destroy cq, err:%d\n", err);
destroy_workqueue(cq_desc->cq_workqueue);
}
err = ib_dereg_mr(isert_dev->mr);
if (unlikely(err))
pr_err("Failed to destroy mr, err:%d\n", err);
err = ib_dealloc_pd(isert_dev->pd);
if (unlikely(err))
pr_err("Failed to destroy pd, err:%d\n", err);
vfree(isert_dev->cq_desc);
isert_dev->cq_desc = NULL;
kfree(isert_dev->cq_qps);
isert_dev->cq_qps = NULL;
kfree(isert_dev);
TRACE_EXIT();
}
示例15: rdma_destroy_trans
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
{
if (!rdma)
return;
if (rdma->qp && !IS_ERR(rdma->qp))
ib_destroy_qp(rdma->qp);
if (rdma->pd && !IS_ERR(rdma->pd))
ib_dealloc_pd(rdma->pd);
if (rdma->cq && !IS_ERR(rdma->cq))
ib_destroy_cq(rdma->cq);
if (rdma->cm_id && !IS_ERR(rdma->cm_id))
rdma_destroy_id(rdma->cm_id);
kfree(rdma);
}