本文整理汇总了C++中ib_req_notify_cq函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_req_notify_cq函数的具体用法?C++ ib_req_notify_cq怎么用?C++ ib_req_notify_cq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_req_notify_cq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rpcrdma_recvcq_upcall
/*
* Handle receive completions.
*
* It is reentrant but processes single events in order to maintain
* ordering of receives to keep server credits.
*
* It is the responsibility of the scheduled tasklet to return
* recv buffers to the pool. NOTE: this affects synchronization of
* connection shutdown. That is, the structures required for
* the completion of the reply handler must remain intact until
* all memory has been reclaimed.
*/
static void
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
{
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc;
rc = rpcrdma_recvcq_poll(cq, ep);
if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc);
return;
}
rc = ib_req_notify_cq(cq,
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
if (rc == 0)
return;
if (rc < 0) {
dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
__func__, rc);
return;
}
rpcrdma_recvcq_poll(cq, ep);
}
示例2: iser_cq_tasklet_fn
static void iser_cq_tasklet_fn(unsigned long data)
{
struct iser_device *device = (struct iser_device *)data;
struct ib_cq *cq = device->cq;
struct ib_wc wc;
struct iser_desc *desc;
unsigned long xfer_len;
while (ib_poll_cq(cq, 1, &wc) == 1) {
desc = (struct iser_desc *) (unsigned long) wc.wr_id;
BUG_ON(desc == NULL);
if (wc.status == IB_WC_SUCCESS) {
if (desc->type == ISCSI_RX) {
xfer_len = (unsigned long)wc.byte_len;
iser_rcv_completion(desc, xfer_len);
} else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
iser_snd_completion(desc);
} else {
iser_err("comp w. error op %d status %d\n",desc->type,wc.status);
iser_handle_comp_error(desc);
}
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
}
示例3: rds_ib_tasklet_fn_recv
static void rds_ib_tasklet_fn_recv(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
struct rds_ib_ack_state state;
if (!rds_ibdev)
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_tasklet_call);
memset(&state, 0, sizeof(state));
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
rds_send_drop_acked(conn, state.ack_recv, NULL);
ic->i_ack_recv = state.ack_recv;
}
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
}
示例4: comp_handler_send
static void comp_handler_send(struct ib_cq* cq, void* cq_context)
{
struct ib_wc wc;
rdma_ctx_t ctx = (rdma_ctx_t)cq_context;
LOG_KERN(LOG_INFO, ("COMP HANDLER\n"));
do {
while (ib_poll_cq(cq, 1, &wc)> 0) {
if (wc.status == IB_WC_SUCCESS) {
LOG_KERN(LOG_INFO, ("IB_WC_SUCCESS\n"));
LOG_KERN(LOG_INFO, ("OP: %s\n",
wc.opcode == IB_WC_RDMA_READ ? "IB_WC_RDMA_READ" :
wc.opcode == IB_WC_RDMA_WRITE ? "IB_WC_RDMA_WRITE" :
"other"));
LOG_KERN(LOG_INFO, ("byte_len: %d\n", wc.byte_len));
LOG_KERN(LOG_INFO, ("Decrementing outstanding requests...\n"));
ctx->outstanding_requests--;
} else {
LOG_KERN(LOG_INFO, ("FAILURE %d\n", wc.status));
}
}
} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS) > 0);
}
示例5: cq_comp_handler
static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
{
struct p9_client *client = cq_context;
struct p9_trans_rdma *rdma = client->trans;
int ret;
struct ib_wc wc;
ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
switch (c->wc_op) {
case IB_WC_RECV:
handle_recv(client, rdma, c, wc.status, wc.byte_len);
up(&rdma->rq_sem);
break;
case IB_WC_SEND:
handle_send(client, rdma, c, wc.status, wc.byte_len);
up(&rdma->sq_sem);
break;
default:
pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
c->wc_op, wc.opcode, wc.status);
break;
}
kfree(c);
}
}
示例6: isert_cq_comp_work_cb
/* callback function for isert_dev->[cq]->cq_comp_work */
static void isert_cq_comp_work_cb(struct work_struct *work)
{
struct isert_cq *cq_desc;
int ret;
TRACE_ENTRY();
cq_desc = container_of(work, struct isert_cq, cq_comp_work);
ret = isert_poll_cq(cq_desc);
if (unlikely(ret < 0)) { /* poll error */
pr_err("ib_poll_cq failed\n");
goto out;
}
ib_req_notify_cq(cq_desc->cq,
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
/*
* not all HCAs support IB_CQ_REPORT_MISSED_EVENTS,
* so we need to make sure we don't miss any events between
* last call to ib_poll_cq() and ib_req_notify_cq()
*/
isert_poll_cq(cq_desc);
out:
TRACE_EXIT();
return;
}
示例7: timer_func
static void timer_func (unsigned long dummy)
{
struct ib_send_wr wr, *bad_wr;
struct ib_sge sge;
int ret;
struct ib_wc wc;
static int id = 1;
if (!have_path)
return;
if (!have_remote_info)
return;
printk (KERN_INFO "verbs_timer: sending datagram to LID = %u, qpn = %x\n", remote_info.lid, remote_info.qp_num);
memset (&wr, 0, sizeof (wr));
wr.wr_id = id++;
wr.wr.ud.ah = ah;
wr.wr.ud.port_num = 1;
wr.wr.ud.remote_qkey = remote_info.qkey;
wr.wr.ud.remote_qpn = remote_info.qp_num;
wr.opcode = IB_WR_SEND;
wr.sg_list = &sge;
wr.send_flags = 0;
wr.num_sge = 1;
/* sge */
sge.addr = send_key;
sge.length = buf_size;
sge.lkey = mr->lkey;
ret = ib_post_send (qp, &wr, &bad_wr);
if (ret)
printk (KERN_INFO "post_send failed: %d\n", ret);
else
printk (KERN_INFO "post_send succeeded\n");
ret = ib_req_notify_cq (recv_cq, IB_CQ_NEXT_COMP);
printk (KERN_INFO "notify_cq return %d for recv_cq\n", ret);
/* ret = ib_req_notify_cq (send_cq, IB_CQ_NEXT_COMP); */
/* printk (KERN_INFO "notify_cq return %d for send_cq\n", ret); */
ret = ib_poll_cq (recv_cq, 1, &wc);
printk (KERN_INFO "poll_cq returned %d for recv_cq\n", ret);
if (ret) {
printk (KERN_INFO "ID: %llu, status: %d, opcode: %d, len: %u\n",
wc.wr_id, (int)wc.status, (int)wc.opcode, wc.byte_len);
verbs_post_recv_req ();
}
ret = ib_poll_cq (send_cq, 1, &wc);
printk (KERN_INFO "poll_cq returned %d for send_cq\n", ret);
mod_timer (&verbs_timer, NEXTJIFF(SEND_INTERVAL));
}
示例8: rpcrdma_recvcq_upcall
/* Handle provider receive completion upcalls.
*/
static void
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
{
do {
rpcrdma_recvcq_poll(cq);
} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS) > 0);
}
示例9: iser_create_device_ib_res
/**
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
* the adapator.
*
* returns 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
goto pd_err;
device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
ISER_MAX_RX_CQ_LEN, 0);
if (IS_ERR(device->rx_cq))
goto rx_cq_err;
device->tx_cq = ib_create_cq(device->ib_device,
NULL, iser_cq_event_callback,
(void *)device,
ISER_MAX_TX_CQ_LEN, 0);
if (IS_ERR(device->tx_cq))
goto tx_cq_err;
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
iser_cq_tasklet_fn,
(unsigned long)device);
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (IS_ERR(device->mr))
goto dma_mr_err;
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
iser_event_handler);
if (ib_register_event_handler(&device->event_handler))
goto handler_err;
return 0;
handler_err:
ib_dereg_mr(device->mr);
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
ib_destroy_cq(device->tx_cq);
tx_cq_err:
ib_destroy_cq(device->rx_cq);
rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
示例10: rds_ib_tasklet_fn_send
static void rds_ib_tasklet_fn_send(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
rds_ib_stats_inc(s_ib_tasklet_call);
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
if (rds_conn_up(conn) &&
(!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued)))
rds_send_xmit(ic->conn);
}
示例11: sq_cq_reap
/*
* Send Queue Completion Handler - potentially called on interrupt context.
*
* Note that caller must hold a transport reference.
*/
static void sq_cq_reap(struct svcxprt_rdma *xprt)
{
struct svc_rdma_op_ctxt *ctxt = NULL;
struct ib_wc wc_a[6];
struct ib_wc *wc;
struct ib_cq *cq = xprt->sc_sq_cq;
int ret;
memset(wc_a, 0, sizeof(wc_a));
if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
return;
ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
atomic_inc(&rdma_stat_sq_poll);
while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
int i;
for (i = 0; i < ret; i++) {
wc = &wc_a[i];
if (wc->status != IB_WC_SUCCESS) {
dprintk("svcrdma: sq wc err status %s (%d)\n",
ib_wc_status_msg(wc->status),
wc->status);
/* Close the transport */
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
}
/* Decrement used SQ WR count */
atomic_dec(&xprt->sc_sq_count);
wake_up(&xprt->sc_send_wait);
ctxt = (struct svc_rdma_op_ctxt *)
(unsigned long)wc->wr_id;
if (ctxt)
process_context(xprt, ctxt);
svc_xprt_put(&xprt->sc_xprt);
}
}
if (ctxt)
atomic_inc(&rdma_stat_sq_prod);
}
示例12: rpcrdma_cq_event_upcall
/*
* rpcrdma_cq_event_upcall
*
* This upcall handles recv, send, bind and unbind events.
* It is reentrant but processes single events in order to maintain
* ordering of receives to keep server credits.
*
* It is the responsibility of the scheduled tasklet to return
* recv buffers to the pool. NOTE: this affects synchronization of
* connection shutdown. That is, the structures required for
* the completion of the reply handler must remain intact until
* all memory has been reclaimed.
*
* Note that send events are suppressed and do not result in an upcall.
*/
static void
rpcrdma_cq_event_upcall(struct ib_cq *cq, void *context)
{
int rc;
rc = rpcrdma_cq_poll(cq);
if (rc)
return;
rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
if (rc) {
dprintk("RPC: %s: ib_req_notify_cq failed %i\n",
__func__, rc);
return;
}
rpcrdma_cq_poll(cq);
}
示例13: rq_cq_reap
/*
* rq_cq_reap - Process the RQ CQ.
*
* Take all completing WC off the CQE and enqueue the associated DTO
* context on the dto_q for the transport.
*
* Note that caller must hold a transport reference.
*/
static void rq_cq_reap(struct svcxprt_rdma *xprt)
{
int ret;
struct ib_wc wc;
struct svc_rdma_op_ctxt *ctxt = NULL;
if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
return;
ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
atomic_inc(&rdma_stat_rq_poll);
while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
ctxt->wc_status = wc.status;
ctxt->byte_len = wc.byte_len;
svc_rdma_unmap_dma(ctxt);
if (wc.status != IB_WC_SUCCESS) {
/* Close the transport */
dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
svc_rdma_put_context(ctxt, 1);
svc_xprt_put(&xprt->sc_xprt);
continue;
}
spin_lock_bh(&xprt->sc_rq_dto_lock);
list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
spin_unlock_bh(&xprt->sc_rq_dto_lock);
svc_xprt_put(&xprt->sc_xprt);
}
if (ctxt)
atomic_inc(&rdma_stat_rq_prod);
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
/*
* If data arrived before established event,
* don't enqueue. This defers RPC I/O until the
* RDMA connection is complete.
*/
if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
svc_xprt_enqueue(&xprt->sc_xprt);
}
示例14: rds_ib_tasklet_fn_send
static void rds_ib_tasklet_fn_send(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
rds_ib_stats_inc(s_ib_tasklet_call);
/* if cq has been already reaped, ignore incoming cq event */
if (atomic_read(&ic->i_cq_quiesce))
return;
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
if (rds_conn_up(conn) &&
(!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued)))
rds_send_xmit(&ic->conn->c_path[0]);
}
示例15: krping_setup_qp
static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
{
int ret;
cb->pd = ib_alloc_pd(cm_id->device);
if (IS_ERR(cb->pd)) {
log(LOG_ERR, "ib_alloc_pd failed\n");
return PTR_ERR(cb->pd);
}
DEBUG_LOG(PFX "created pd %p\n", cb->pd);
cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
cb, cb->txdepth * 2, 0);
if (IS_ERR(cb->cq)) {
log(LOG_ERR, "ib_create_cq failed\n");
ret = PTR_ERR(cb->cq);
goto err1;
}
DEBUG_LOG(PFX "created cq %p\n", cb->cq);
if (!cb->wlat && !cb->rlat && !cb->bw) {
ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
if (ret) {
log(LOG_ERR, "ib_create_cq failed\n");
goto err2;
}
}
ret = krping_create_qp(cb);
if (ret) {
log(LOG_ERR, "krping_create_qp failed: %d\n", ret);
goto err2;
}
DEBUG_LOG(PFX "created qp %p\n", cb->qp);
return 0;
err2:
ib_destroy_cq(cb->cq);
err1:
ib_dealloc_pd(cb->pd);
return ret;
}