本文整理汇总了C++中ib_dma_mapping_error函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_dma_mapping_error函数的具体用法?C++ ib_dma_mapping_error怎么用?C++ ib_dma_mapping_error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_dma_mapping_error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: netdev_priv
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int i;
skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
if (unlikely(!skb))
return NULL;
/*
* IPoIB adds a 4 byte header. So we need 12 more bytes to align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, 12);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb);
return NULL;
}
for (i = 0; i < frags; i++) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
}
示例2: iser_alloc_rx_descriptors
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->mr->lkey;
}
iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
rx_desc = iser_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
iser_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
示例3: rdma_set_ctxt_sge
static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
struct svc_rdma_op_ctxt *ctxt,
struct svc_rdma_fastreg_mr *frmr,
struct kvec *vec,
u64 *sgl_offset,
int count)
{
int i;
ctxt->count = count;
ctxt->direction = DMA_FROM_DEVICE;
for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) {
ctxt->sge[i].addr =
ib_dma_map_single(xprt->sc_cm_id->device,
vec[i].iov_base,
vec[i].iov_len,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
ctxt->sge[i].addr))
return -EINVAL;
ctxt->sge[i].lkey = xprt->sc_dma_lkey;
atomic_inc(&xprt->sc_dma_used);
} else {
ctxt->sge[i].addr = (unsigned long)vec[i].iov_base;
ctxt->sge[i].lkey = frmr->mr->lkey;
}
ctxt->sge[i].length = vec[i].iov_len;
*sgl_offset = *sgl_offset + vec[i].iov_len;
}
return 0;
}
示例4: VNIC_BUF_SIZE
struct sk_buff *vnic_alloc_rx_skb(struct vnic_rx_ring *ring, int buf_ind,
gfp_t gfp_flag)
{
struct ib_device *ca = ring->port->dev->ca;
struct sk_buff *skb;
u64 mapping;
int buf_size = VNIC_BUF_SIZE(ring->port);
skb = alloc_skb(buf_size, gfp_flag);
if (!skb) {
vnic_dbg_data(ring->port->name,
"alloc_skb for size %d failed\n", buf_size);
goto err_alloc;
}
mapping = ib_dma_map_single(ca, skb->data, buf_size, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping))) {
vnic_dbg_data(ring->port->name,
"ib_dma_map_single len %d failed\n", buf_size);
goto err_map;
}
ring->rx_info[buf_ind].skb = skb;
ring->rx_info[buf_ind].dma_addr[0] = mapping;
return skb;
err_map:
dev_kfree_skb_any(skb);
err_alloc:
return NULL;
}
示例5: post_recv
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr;
struct ib_sge sge;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc.sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
c->cqe.done = recv_done;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, NULL);
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
示例6: post_recv
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr, *bad_wr;
struct ib_sge sge;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc->sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->lkey;
wr.next = NULL;
c->wc_op = IB_WC_RECV;
wr.wr_id = (unsigned long) c;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, &bad_wr);
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
示例7: iser_alloc_login_buf
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
int req_err, resp_err;
BUG_ON(device == NULL);
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!iser_conn->login_buf)
goto out_err;
iser_conn->login_req_buf = iser_conn->login_buf;
iser_conn->login_resp_buf = iser_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_resp_dma);
if (req_err || resp_err) {
if (req_err)
iser_conn->login_req_dma = 0;
if (resp_err)
iser_conn->login_resp_dma = 0;
goto free_login_buf;
}
return 0;
free_login_buf:
iser_free_login_buf(iser_conn);
out_err:
iser_err("unable to alloc or map login buf\n");
return -ENOMEM;
}
示例8: svc_rdma_bc_sendto
/* Send a backwards direction RPC call.
*
* Caller holds the connection's mutex and has already marshaled
* the RPC/RDMA request.
*
* This is similar to svc_rdma_reply, but takes an rpc_rqst
* instead, does not support chunks, and avoids blocking memory
* allocation.
*
* XXX: There is still an opportunity to block in svc_rdma_send()
* if there are no SQ entries to post the Send. This may occur if
* the adapter has a small maximum SQ depth.
*/
static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
struct rpc_rqst *rqst)
{
struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
struct svc_rdma_op_ctxt *ctxt;
struct svc_rdma_req_map *vec;
struct ib_send_wr send_wr;
int ret;
vec = svc_rdma_get_req_map(rdma);
ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);
if (ret)
goto out_err;
ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
if (ret)
goto out_err;
ctxt = svc_rdma_get_context(rdma);
ctxt->pages[0] = virt_to_page(rqst->rq_buffer);
ctxt->count = 1;
ctxt->direction = DMA_TO_DEVICE;
ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
ctxt->sge[0].length = sndbuf->len;
ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
sndbuf->len, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {
ret = -EIO;
goto out_unmap;
}
svc_rdma_count_mappings(rdma, ctxt);
memset(&send_wr, 0, sizeof(send_wr));
ctxt->cqe.done = svc_rdma_wc_send;
send_wr.wr_cqe = &ctxt->cqe;
send_wr.sg_list = ctxt->sge;
send_wr.num_sge = 1;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
ret = svc_rdma_send(rdma, &send_wr);
if (ret) {
ret = -EIO;
goto out_unmap;
}
out_err:
svc_rdma_put_req_map(rdma, vec);
dprintk("svcrdma: %s returns %d\n", __func__, ret);
return ret;
out_unmap:
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
goto out_err;
}
示例9: vnic_alloc_frag
/*
* allocate a single fragment on a single ring entry and map it
* to HW address.
*/
static int vnic_alloc_frag(struct vnic_rx_ring *ring,
struct vnic_frag_data *frags_data, int i)
{
struct vnic_frag_info *frag_info = &ring->frag_info[i];
struct vnic_rx_alloc *page_alloc = &ring->page_alloc[i];
struct skb_frag_struct *skb_frags = &frags_data->frags[i];
struct skb_frag_struct skbf = *skb_frags;
struct page *page;
struct ib_device *ib_device = ring->port->dev->ca;
u64 dma;
int decision;
if (vnic_rx_linear)
return 0;
if (page_alloc->offset >= frag_info->last_offset) {
decision = 0;
/* Allocate new page */
page = alloc_pages(GFP_ATOMIC | __GFP_COMP, VNIC_ALLOC_ORDER);
if (!page) {
/*frags_data->dma_addr[i] = NULL;
ring->rx_info[wr_id].info = VNIC_FRAG_ALLOC_FAIL;
ring->need_refill = 1; */
return -ENOMEM;
}
skbf.page.p = page_alloc->page;
skbf.page_offset = page_alloc->offset;
} else {
decision = 1;
page = page_alloc->page;
get_page(page);
skbf.page.p = page;
skbf.page_offset = page_alloc->offset;
}
skbf.size = frag_info->frag_size;
dma = ib_dma_map_single(ib_device, page_address(skbf.page.p) +
skbf.page_offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
if (unlikely(ib_dma_mapping_error(ib_device, dma))) {
vnic_dbg_data(ring->port->name,
"ib_dma_map_single len %d failed\n",
frag_info->frag_size);
put_page(page);
return -ENOMEM;
}
if (!decision) {
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else
page_alloc->offset += frag_info->frag_stride;
*skb_frags = skbf;
frags_data->dma_addr[i] = dma;
return 0;
}
示例10: ib_umem_odp_map_dma_single_page
/*
* Map for DMA and insert a single page into the on-demand paging page tables.
*
* @umem: the umem to insert the page to.
* @page_index: index in the umem to add the page to.
* @page: the page struct to map and add.
* @access_mask: access permissions needed for this page.
* @current_seq: sequence number for synchronization with invalidations.
* the sequence number is taken from
* umem->odp_data->notifiers_seq.
*
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
*
* The page is released via put_page even if the operation failed. For
* on-demand pinning, the page is released whenever it isn't stored in the
* umem.
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem *umem,
int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq,
enum ib_odp_dma_map_flags flags)
{
struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr;
int stored_page = 0;
int ret = 0;
mutex_lock(&umem->odp_data->umem_mutex);
/*
* Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail
* early if we have a notifier running in parallel with us.
*/
if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
ret = -EAGAIN;
goto out;
}
if (!(umem->odp_data->dma_list[page_index])) {
dma_addr = ib_dma_map_page(dev,
page,
0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (ib_dma_mapping_error(dev, dma_addr)) {
ret = -EFAULT;
goto out;
}
umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
umem->odp_data->page_list[page_index] = page;
if (flags & IB_ODP_DMA_MAP_FOR_PREFETCH)
atomic_inc(&dev->odp_statistics.num_prefetch_pages);
else
atomic_inc(&dev->odp_statistics.num_page_fault_pages);
stored_page = 1;
} else if (umem->odp_data->page_list[page_index] == page) {
umem->odp_data->dma_list[page_index] |= access_mask;
} else {
pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n"
, umem->odp_data->page_list[page_index], page);
}
out:
mutex_unlock(&umem->odp_data->umem_mutex);
/* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page)
put_page(page);
return ret;
}
示例11: svc_rdma_post_recv
static int
svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{
struct ib_recv_wr recv_wr, *bad_recv_wr;
struct svc_rdma_op_ctxt *ctxt;
struct page *page;
dma_addr_t pa;
int sge_no;
int buflen;
int ret;
ctxt = svc_rdma_get_context(xprt);
buflen = 0;
ctxt->direction = DMA_FROM_DEVICE;
ctxt->cqe.done = svc_rdma_wc_receive;
for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
if (sge_no >= xprt->sc_max_sge) {
pr_err("svcrdma: Too many sges (%d)\n", sge_no);
goto err_put_ctxt;
}
page = alloc_page(GFP_KERNEL);
if (!page)
goto err_put_ctxt;
ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device,
page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt;
svc_rdma_count_mappings(xprt, ctxt);
ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->count = sge_no + 1;
buflen += PAGE_SIZE;
}
recv_wr.next = NULL;
recv_wr.sg_list = &ctxt->sge[0];
recv_wr.num_sge = ctxt->count;
recv_wr.wr_cqe = &ctxt->cqe;
svc_xprt_get(&xprt->sc_xprt);
ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
if (ret) {
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
svc_xprt_put(&xprt->sc_xprt);
}
return ret;
err_put_ctxt:
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
return -ENOMEM;
}
示例12: iser_alloc_login_buf
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct iser_login_desc *desc = &iser_conn->login_desc;
desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
if (!desc->req)
return -ENOMEM;
desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->req_dma))
goto free_req;
desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!desc->rsp)
goto unmap_req;
desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->rsp_dma))
goto free_rsp;
return 0;
free_rsp:
kfree(desc->rsp);
unmap_req:
ib_dma_unmap_single(device->ib_device, desc->req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
free_req:
kfree(desc->req);
return -ENOMEM;
}
示例13: rdma_map_address
u64 rdma_map_address(void* addr, int length)
{
u64 dma_addr;
LOG_KERN(LOG_INFO, ("Mapping addr\n"));
dma_addr = ib_dma_map_single(rdma_ib_device.dev, addr, length, DMA_BIDIRECTIONAL);
if (ib_dma_mapping_error(rdma_ib_device.dev, dma_addr) != 0) {
LOG_KERN(LOG_INFO, ("Error mapping myaddr"));
return 0; //error
}
return dma_addr;
}
示例14: svc_rdma_post_recv
int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{
struct ib_recv_wr recv_wr, *bad_recv_wr;
struct svc_rdma_op_ctxt *ctxt;
struct page *page;
dma_addr_t pa;
int sge_no;
int buflen;
int ret;
ctxt = svc_rdma_get_context(xprt);
buflen = 0;
ctxt->direction = DMA_FROM_DEVICE;
for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page();
ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device,
page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt;
atomic_inc(&xprt->sc_dma_used);
ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
ctxt->count = sge_no + 1;
buflen += PAGE_SIZE;
}
recv_wr.next = NULL;
recv_wr.sg_list = &ctxt->sge[0];
recv_wr.num_sge = ctxt->count;
recv_wr.wr_id = (u64)(unsigned long)ctxt;
svc_xprt_get(&xprt->sc_xprt);
ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
if (ret) {
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
svc_xprt_put(&xprt->sc_xprt);
}
return ret;
err_put_ctxt:
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
return -ENOMEM;
}
示例15: iser_reg_single
/**
* iser_reg_single - fills registered buffer descriptor with
* registration information
*/
void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
u64 dma_addr;
dma_addr = ib_dma_map_single(device->ib_device,
regd_buf->virt_addr,
regd_buf->data_size, direction);
BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
regd_buf->reg.va = dma_addr;
regd_buf->reg.is_fmr = 0;
regd_buf->dma_addr = dma_addr;
regd_buf->direction = direction;
}