本文整理汇总了C++中ib_copy_to_udata函数的典型用法代码示例。如果您正苦于以下问题:C++ ib_copy_to_udata函数的具体用法?C++ ib_copy_to_udata怎么用?C++ ib_copy_to_udata使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ib_copy_to_udata函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: to_mdev
static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
return ERR_PTR(-ENOMEM);
err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
if (err) {
kfree(context);
return ERR_PTR(err);
}
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
err = ib_copy_to_udata(udata, &resp, sizeof resp);
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
return ERR_PTR(-EFAULT);
}
return &context->ibucontext;
}
示例2: hns_roce_alloc_pd
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_udata *udata)
{
struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret;
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
return ret;
}
if (context) {
struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
return -EFAULT;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
void hns_roce_dealloc_pd(struct ib_pd *pd)
{
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
}
示例3: kmalloc
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mlx4_ib_pd *pd;
int err;
pd = kmalloc(sizeof *pd, GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
if (err) {
kfree(pd);
return ERR_PTR(err);
}
if (context)
if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
kfree(pd);
return ERR_PTR(-EFAULT);
}
return &pd->ibpd;
}
示例4: to_mdev
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
enum rdma_ah_attr_type ah_type = ah_attr->type;
if ((ah_type == RDMA_AH_ATTR_TYPE_ROCE) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
int err;
struct mlx5_ib_create_ah_resp resp = {};
u32 min_resp_len = offsetof(typeof(resp), dmac) +
sizeof(resp.dmac);
if (udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
resp.response_length = min_resp_len;
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
return ERR_PTR(err);
}
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
return create_ib_ah(dev, ah, ah_attr); /* never fails */
}
示例5: kmalloc
static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mlx5_ib_alloc_pd_resp resp;
struct mlx5_ib_pd *pd;
int err;
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
if (err) {
kfree(pd);
return ERR_PTR(err);
}
if (context) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
kfree(pd);
return ERR_PTR(-EFAULT);
}
}
return &pd->ibpd;
}
示例6: to_vdev
/**
* pvrdma_alloc_pd - allocate protection domain
* @ibdev: the IB device
* @context: user context
* @udata: user data
*
* @return: the ib_pd protection domain pointer on success, otherwise errno.
*/
struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct pvrdma_pd *pd;
struct pvrdma_dev *dev = to_vdev(ibdev);
union pvrdma_cmd_req req;
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
void *ptr;
/* Check allowed max pds */
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
return ERR_PTR(-ENOMEM);
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
ptr = ERR_PTR(-ENOMEM);
goto err;
}
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
if (ret < 0) {
dev_warn(&dev->pdev->dev,
"failed to allocate protection domain, error: %d\n",
ret);
ptr = ERR_PTR(ret);
goto freepd;
}
pd->privileged = !context;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
if (context) {
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd);
return ERR_PTR(-EFAULT);
}
}
/* u32 pd handle */
return &pd->ibpd;
freepd:
kfree(pd);
err:
atomic_dec(&dev->num_pds);
return ptr;
}
示例7: siw_dev_ofa2siw
struct ib_cq *siw_create_cq(struct ib_device *ofa_dev,
const struct ib_cq_init_attr *attr,
// int size,
// int vec /* unused */,
struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
struct siw_cq *cq = NULL;
struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
struct siw_uresp_create_cq uresp;
int rv;
if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
dprint(DBG_ON, ": Out of CQ's\n");
rv = -ENOMEM;
goto err_out;
}
if (attr->cqe < 1 || attr->cqe> SIW_MAX_CQE) {
dprint(DBG_ON, ": CQE: %d\n", attr->cqe);
rv = -EINVAL;
goto err_out;
}
cq = kmalloc(sizeof *cq, GFP_KERNEL);
if (!cq) {
dprint(DBG_ON, ": kmalloc\n");
rv = -ENOMEM;
goto err_out;
}
// cq->ofa_cq.cqe = size - 1;
cq->ofa_cq.cqe = attr->cqe - 1;
rv = siw_cq_add(sdev, cq);
if (rv)
goto err_out_idr;
INIT_LIST_HEAD(&cq->queue);
spin_lock_init(&cq->lock);
atomic_set(&cq->qlen, 0);
if (ib_context) {
uresp.cq_id = OBJ_ID(cq);
rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (rv)
goto err_out_idr;
}
return &cq->ofa_cq;
err_out_idr:
siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
err_out:
dprint(DBG_OBJ, ": CQ creation failed\n");
kfree(cq);
atomic_dec(&sdev->num_cq);
return ERR_PTR(rv);
}
示例8: to_c4iw_dev
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
goto err;
}
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto err_free;
}
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return &context->ibucontext;
err_mm:
kfree(mm);
err_free:
kfree(context);
err:
return ERR_PTR(ret);
}
示例9: siw_dev_ofa2siw
struct ib_ucontext *siw_alloc_ucontext(struct ib_device *ofa_dev,
struct ib_udata *udata)
{
struct siw_ucontext *ctx = NULL;
struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
int rv;
pr_debug(DBG_CM "(device=%s)\n", ofa_dev->name);
if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
pr_debug(": Out of CONTEXT's\n");
rv = -ENOMEM;
goto err_out;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rv = -ENOMEM;
goto err_out;
}
ctx->sdev = sdev;
if (udata) {
struct urdma_uresp_alloc_ctx uresp;
struct file *filp;
memset(&uresp, 0, sizeof uresp);
uresp.dev_id = sdev->attrs.vendor_part_id;
filp = siw_event_file_new(ctx, &uresp.event_fd);
if (IS_ERR(filp)) {
rv = PTR_ERR(filp);
goto err_out;
}
rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (rv) {
fput(filp);
kfree(ctx->event_file);
put_unused_fd(uresp.event_fd);
goto err_out;
}
fd_install(uresp.event_fd, filp);
}
return &ctx->ib_ucontext;
err_out:
if (ctx)
kfree(ctx);
atomic_dec(&sdev->num_ctx);
return ERR_PTR(rv);
}
示例10: c4iw_alloc_ucontext
static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
struct ib_udata *udata)
{
struct ib_device *ibdev = ucontext->device;
struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
pr_debug("ibdev %p\n", ibdev);
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto err;
}
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return 0;
err_mm:
kfree(mm);
err:
return ret;
}
示例11: to_mdev
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
enum rdma_link_layer ll;
ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (ll == IB_LINK_LAYER_ETHERNET && udata) {
int err;
struct mlx5_ib_create_ah_resp resp = {};
u32 min_resp_len = offsetof(typeof(resp), dmac) +
sizeof(resp.dmac);
if (udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
resp.response_length = min_resp_len;
err = ib_resolve_eth_dmac(pd->device, ah_attr);
if (err)
return ERR_PTR(err);
memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
return ERR_PTR(err);
}
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
return create_ib_ah(dev, ah, ah_attr, ll); /* never fails */
}
示例12: PDBG
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_cq *chp;
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
struct iwch_ucontext *ucontext = NULL;
PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
rhp = to_iwch_dev(ibdev);
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
if (ib_context) {
ucontext = to_iwch_ucontext(ib_context);
if (!t3a_device(rhp)) {
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
kfree(chp);
return ERR_PTR(-EFAULT);
}
chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
}
}
if (t3a_device(rhp)) {
/*
* T3A: Add some fluff to handle extra CQEs inserted
* for various errors.
* Additional CQE possibilities:
* TERMINATE,
* incoming RDMA WRITE Failures
* incoming RDMA READ REQUEST FAILUREs
* NOTE: We cannot ensure the CQ won't overflow.
*/
entries += 16;
}
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ucontext) {
struct iwch_mm_entry *mm;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) {
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-ENOMEM);
}
uresp.cqid = chp->cq.cqid;
uresp.size_log2 = chp->cq.size_log2;
spin_lock(&ucontext->mmap_lock);
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
kfree(mm);
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-EFAULT);
}
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
sizeof (struct t3_cqe));
insert_mmap(ucontext, mm);
}
PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
chp->cq.cqid, chp, (1 << chp->cq.size_log2),
(unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
}
示例13: qib_modify_srq
/**
* qib_modify_srq - modify a shared receive queue
* @ibsrq: the SRQ to modify
* @attr: the new attributes of the SRQ
* @attr_mask: indicates which attributes to modify
* @udata: user data for libibverbs.so
*/
int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata)
{
struct qib_srq *srq = to_isrq(ibsrq);
struct qib_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
struct qib_rwq *owq;
struct qib_rwqe *p;
u32 sz, size, n, head, tail;
/* Check that the requested sizes are below the limits. */
if ((attr->max_wr > ib_qib_max_srq_wrs) ||
((attr_mask & IB_SRQ_LIMIT) ?
attr->srq_limit : srq->limit) > attr->max_wr) {
ret = -EINVAL;
goto bail;
}
sz = sizeof(struct qib_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
if (!wq) {
ret = -ENOMEM;
goto bail;
}
/* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) {
__u64 offset_addr;
__u64 offset = 0;
ret = ib_copy_from_udata(&offset_addr, udata,
sizeof(offset_addr));
if (ret)
goto bail_free;
udata->outbuf =
(void __user *) (unsigned long) offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret)
goto bail_free;
}
spin_lock_irq(&srq->rq.lock);
/*
* validate head and tail pointer values and compute
* the number of remaining WQEs.
*/
owq = srq->rq.wq;
head = owq->head;
tail = owq->tail;
if (head >= srq->rq.size || tail >= srq->rq.size) {
ret = -EINVAL;
goto bail_unlock;
}
n = head;
if (n < tail)
n += srq->rq.size - tail;
else
n -= tail;
if (size <= n) {
ret = -EINVAL;
goto bail_unlock;
}
n = 0;
p = wq->wq;
while (tail != head) {
struct qib_rwqe *wqe;
int i;
wqe = get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id;
p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
p = (struct qib_rwqe *)((char *) p + sz);
if (++tail >= srq->rq.size)
tail = 0;
}
srq->rq.wq = wq;
srq->rq.size = size;
wq->head = n;
wq->tail = 0;
if (attr_mask & IB_SRQ_LIMIT)
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
vfree(owq);
//.........这里部分代码省略.........
示例14: to_idev
/**
* qib_create_srq - create a shared receive queue
* @ibpd: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
*/
struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct qib_ibdev *dev = to_idev(ibpd->device);
struct qib_srq *srq;
u32 sz;
struct ib_srq *ret;
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
srq_init_attr->attr.max_wr == 0 ||
srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
ret = ERR_PTR(-EINVAL);
goto done;
}
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
if (!srq) {
ret = ERR_PTR(-ENOMEM);
goto done;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
srq->rq.size = srq_init_attr->attr.max_wr + 1;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct qib_rwqe);
srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
}
/*
* Return the address of the RWQ as the offset to mmap.
* See qib_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
srq->ip =
qib_create_mmap_info(dev, s, ibpd->uobject->context,
srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
}
err = ib_copy_to_udata(udata, &srq->ip->offset,
sizeof(srq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
} else
srq->ip = NULL;
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
srq->rq.wq->head = 0;
srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
if (dev->n_srqs_allocated == ib_qib_max_srqs) {
spin_unlock(&dev->n_srqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
dev->n_srqs_allocated++;
spin_unlock(&dev->n_srqs_lock);
if (srq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
ret = &srq->ibsrq;
goto done;
bail_ip:
kfree(srq->ip);
bail_wq:
vfree(srq->rq.wq);
bail_srq:
kfree(srq);
//.........这里部分代码省略.........
示例15: PDBG
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
int entries = attr->cqe;
struct iwch_dev *rhp;
struct iwch_cq *chp;
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
struct iwch_ucontext *ucontext = NULL;
static int warned;
size_t resplen;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
rhp = to_iwch_dev(ibdev);
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
if (ib_context) {
ucontext = to_iwch_ucontext(ib_context);
if (!t3a_device(rhp)) {
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
kfree(chp);
return ERR_PTR(-EFAULT);
}
chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
}
}
if (t3a_device(rhp)) {
/*
* T3A: Add some fluff to handle extra CQEs inserted
* for various errors.
* Additional CQE possibilities:
* TERMINATE,
* incoming RDMA WRITE Failures
* incoming RDMA READ REQUEST FAILUREs
* NOTE: We cannot ensure the CQ won't overflow.
*/
entries += 16;
}
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
kfree(chp);
return ERR_PTR(-ENOMEM);
}
if (ucontext) {
struct iwch_mm_entry *mm;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) {
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-ENOMEM);
}
uresp.cqid = chp->cq.cqid;
uresp.size_log2 = chp->cq.size_log2;
spin_lock(&ucontext->mmap_lock);
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
if (udata->outlen < sizeof uresp) {
if (!warned++)
printk(KERN_WARNING MOD "Warning - "
"downlevel libcxgb3 (non-fatal).\n");
mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
sizeof(struct t3_cqe));
resplen = sizeof(struct iwch_create_cq_resp_v0);
} else {
mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
sizeof(struct t3_cqe));
uresp.memsize = mm->len;
uresp.reserved = 0;
resplen = sizeof uresp;
}
if (ib_copy_to_udata(udata, &uresp, resplen)) {
kfree(mm);
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-EFAULT);
//.........这里部分代码省略.........