本文整理汇总了C++中sg_next函数的典型用法代码示例。如果您正苦于以下问题:C++ sg_next函数的具体用法?C++ sg_next怎么用?C++ sg_next使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sg_next函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __get_userbuf
/* fetch the pages addr resides in into pg and initialise sg with them */
int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
unsigned int pgcount, struct page **pg, struct scatterlist *sg,
struct task_struct *task, struct mm_struct *mm)
{
int ret, pglen, i = 0;
struct scatterlist *sgp;
if (unlikely(!pgcount || !len || !addr)) {
sg_mark_end(sg);
return 0;
}
down_read(&mm->mmap_sem);
ret = get_user_pages(task, mm,
(unsigned long)addr, pgcount, write, 0, pg, NULL);
up_read(&mm->mmap_sem);
if (ret != pgcount)
return -EINVAL;
sg_init_table(sg, pgcount);
pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
len -= pglen;
for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
pglen = min((uint32_t)PAGE_SIZE, len);
sg_set_page(sgp, pg[i++], pglen, 0);
len -= pglen;
}
sg_mark_end(sg_last(sg, pgcount));
return 0;
}
示例2: xio_tbl_set_nents
/*---------------------------------------------------------------------------*/
static inline void xio_tbl_set_nents(struct sg_table *tbl, uint32_t nents)
{
struct scatterlist *sg;
int i;
#ifdef XIO_DEBUG_SG
verify_tbl(tbl);
#endif
if (!tbl || tbl->orig_nents < nents)
return;
sg = tbl->sgl;
/* tbl->nents is unsigned so if tbl->nents is ZERO then tbl->nents - 1
* is a huge number, so check this.
*/
if (tbl->nents && (tbl->nents < tbl->orig_nents)) {
for (i = 0; i < tbl->nents - 1; i++)
sg = sg_next(sg);
sg_unmark_end(sg);
}
if (!nents) {
tbl->nents = nents;
return;
}
sg = tbl->sgl;
for (i = 0; i < nents - 1; i++)
sg = sg_next(sg);
sg_mark_end(sg);
tbl->nents = nents;
}
示例3: flexrm_spu_estimate_nonheader_desc_count
static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
{
u32 cnt = 0;
unsigned int dst_target = 0;
struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
while (src_sg || dst_sg) {
if (src_sg) {
cnt++;
dst_target = src_sg->length;
src_sg = sg_next(src_sg);
} else
dst_target = UINT_MAX;
while (dst_target && dst_sg) {
cnt++;
if (dst_sg->length < dst_target)
dst_target -= dst_sg->length;
else
dst_target = 0;
dst_sg = sg_next(dst_sg);
}
}
return cnt;
}
示例4: flexrm_spu_sanity_check
static bool flexrm_spu_sanity_check(struct brcm_message *msg)
{
struct scatterlist *sg;
if (!msg->spu.src || !msg->spu.dst)
return false;
for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
if (sg->length & 0xf) {
if (sg->length > SRC_LENGTH_MASK)
return false;
} else {
if (sg->length > (MSRC_LENGTH_MASK * 16))
return false;
}
}
for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
if (sg->length & 0xf) {
if (sg->length > DST_LENGTH_MASK)
return false;
} else {
if (sg->length > (MDST_LENGTH_MASK * 16))
return false;
}
}
return true;
}
示例5: cc_dma_map_sg
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
}
return nents;
err:
/* Restore mapped parts */
for (j = 0; j < i; j++) {
if (!sg)
break;
dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
return 0;
}
示例6: ss_sg_table_init
/* Make a sg_table based on sg[] of crypto request. */
static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg,
int len, char *vbase, dma_addr_t pbase)
{
int i;
int npages = 0;
int offset = 0;
struct scatterlist *src_sg = sg;
struct scatterlist *dst_sg = NULL;
npages = ss_sg_cnt(sg, len);
WARN_ON(npages == 0);
if (sg_alloc_table(sgt, npages, GFP_KERNEL)) {
SS_ERR("sg_alloc_table(%d) failed!\n", npages);
WARN_ON(1);
}
dst_sg = sgt->sgl;
for (i=0; i<npages; i++) {
sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg));
offset += sg_dma_len(src_sg);
src_sg = sg_next(src_sg);
dst_sg = sg_next(dst_sg);
}
return 0;
}
示例7: while
static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
u32 reqid, void *desc_ptr, u32 toggle,
void *start_desc, void *end_desc)
{
u64 d;
u32 nhpos = 0;
void *orig_desc_ptr = desc_ptr;
unsigned int dst_target = 0;
struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
while (src_sg || dst_sg) {
if (src_sg) {
if (sg_dma_len(src_sg) & 0xf)
d = flexrm_src_desc(sg_dma_address(src_sg),
sg_dma_len(src_sg));
else
d = flexrm_msrc_desc(sg_dma_address(src_sg),
sg_dma_len(src_sg)/16);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
dst_target = sg_dma_len(src_sg);
src_sg = sg_next(src_sg);
} else
dst_target = UINT_MAX;
while (dst_target && dst_sg) {
if (sg_dma_len(dst_sg) & 0xf)
d = flexrm_dst_desc(sg_dma_address(dst_sg),
sg_dma_len(dst_sg));
else
d = flexrm_mdst_desc(sg_dma_address(dst_sg),
sg_dma_len(dst_sg)/16);
flexrm_enqueue_desc(nhpos, nhcnt, reqid,
d, &desc_ptr, &toggle,
start_desc, end_desc);
nhpos++;
if (sg_dma_len(dst_sg) < dst_target)
dst_target -= sg_dma_len(dst_sg);
else
dst_target = 0;
dst_sg = sg_next(dst_sg);
}
}
/* Null descriptor with invalid toggle bit */
flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
/* Ensure that descriptors have been written to memory */
wmb();
/* Flip toggle bit in header */
flexrm_flip_header_toogle(orig_desc_ptr);
return desc_ptr;
}
示例8: bfa_ioim_sgpg_setup
static void
bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
{
int sgeid, nsges, i;
struct bfi_sge_s *sge;
struct bfa_sgpg_s *sgpg;
u32 pgcumsz;
u64 addr;
struct scatterlist *sg;
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
sgeid = BFI_SGE_INLINE;
ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
sg = scsi_sglist(cmnd);
sg = sg_next(sg);
do {
sge = sgpg->sgpg->sges;
nsges = ioim->nsges - sgeid;
if (nsges > BFI_SGPG_DATA_SGES)
nsges = BFI_SGPG_DATA_SGES;
pgcumsz = 0;
for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
addr = bfa_os_sgaddr(sg_dma_address(sg));
sge->sga = *(union bfi_addr_u *) &addr;
sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
/**
* set flags
*/
if (i < (nsges - 1))
sge->flags = BFI_SGE_DATA;
else if (sgeid < (ioim->nsges - 1))
sge->flags = BFI_SGE_DATA_CPL;
else
sge->flags = BFI_SGE_DATA_LAST;
}
sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
/**
* set the link element of each page
*/
if (sgeid == ioim->nsges) {
sge->flags = BFI_SGE_PGDLEN;
sge->sga.a32.addr_lo = 0;
sge->sga.a32.addr_hi = 0;
} else {
sge->flags = BFI_SGE_LINK;
sge->sga = sgpg->sgpg_pa;
}
sge->sg_len = pgcumsz;
} while (sgeid < ioim->nsges);
}
示例9: dma_buf_to_obj
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
goto err;
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret =-ENOMEM;
goto err_free_sg;
}
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
示例10: INMSG
static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
struct dma_buf_attachment *attach, enum dma_data_direction dir)
{
struct tee_shm_attach *tee_shm_attach = attach->priv;
struct tee_shm *tee_shm = attach->dmabuf->priv;
struct sg_table *sgt = NULL;
struct scatterlist *rd, *wr;
unsigned int i;
int nents, ret;
struct tee *tee;
tee = tee_shm->tee;
INMSG();
/* just return current sgt if already requested. */
if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
OUTMSGX(&tee_shm_attach->sgt);
return &tee_shm_attach->sgt;
}
sgt = &tee_shm_attach->sgt;
ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
if (ret) {
dev_err(_DEV(tee), "failed to alloc sgt.\n");
return ERR_PTR(-ENOMEM);
}
rd = tee_shm->sgt.sgl;
wr = sgt->sgl;
for (i = 0; i < sgt->orig_nents; ++i) {
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
rd = sg_next(rd);
wr = sg_next(wr);
}
if (dir != DMA_NONE) {
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
if (!nents) {
dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
sg_free_table(sgt);
sgt = ERR_PTR(-EIO);
goto err_unlock;
}
}
tee_shm_attach->is_mapped = true;
tee_shm_attach->dir = dir;
attach->priv = tee_shm_attach;
err_unlock:
OUTMSGX(sgt);
return sgt;
}
示例11: sahara_sha_hw_links_create
static int sahara_sha_hw_links_create(struct sahara_dev *dev,
struct sahara_sha_reqctx *rctx,
int start)
{
struct scatterlist *sg;
unsigned int i;
int ret;
dev->in_sg = rctx->in_sg;
dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
dev_err(dev->device, "not enough hw links (%d)\n",
dev->nb_in_sg + dev->nb_out_sg);
return -EINVAL;
}
if (rctx->in_sg_chained) {
i = start;
sg = dev->in_sg;
while (sg) {
ret = dma_map_sg(dev->device, sg, 1,
DMA_TO_DEVICE);
if (!ret)
return -EFAULT;
dev->hw_link[i]->len = sg->length;
dev->hw_link[i]->p = sg->dma_address;
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
i += 1;
}
dev->hw_link[i-1]->next = 0;
} else {
sg = dev->in_sg;
ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
if (!ret)
return -EFAULT;
for (i = start; i < dev->nb_in_sg + start; i++) {
dev->hw_link[i]->len = sg->length;
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0;
} else {
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
}
}
return i;
}
示例12: cxgbit_set_one_ppod
static void
cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
struct cxgbi_task_tag_info *ttinfo,
struct scatterlist **sg_pp, unsigned int *sg_off)
{
struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
unsigned int offset = sg_off ? *sg_off : 0;
dma_addr_t addr = 0UL;
unsigned int len = 0;
int i;
memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
if (sg) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
}
for (i = 0; i < PPOD_PAGES_MAX; i++) {
if (sg) {
ppod->addr[i] = cpu_to_be64(addr + offset);
offset += PAGE_SIZE;
if (offset == (len + sg->offset)) {
offset = 0;
sg = sg_next(sg);
if (sg) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
}
}
} else {
ppod->addr[i] = 0ULL;
}
}
/*
* the fifth address needs to be repeated in the next ppod, so do
* not move sg
*/
if (sg_pp) {
*sg_pp = sg;
*sg_off = offset;
}
if (offset == len) {
offset = 0;
if (sg) {
sg = sg_next(sg);
if (sg)
addr = sg_dma_address(sg);
}
}
ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
}
示例13: i915_mutex_lock_interruptible
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
ret = i915_gem_object_get_pages(obj);
if (ret) {
st = ERR_PTR(ret);
goto out;
}
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
st = ERR_PTR(-ENOMEM);
goto out;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
if (ret) {
kfree(st);
st = ERR_PTR(ret);
goto out;
}
src = obj->pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) {
sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
sg_free_table(st);
kfree(st);
st = ERR_PTR(-ENOMEM);
goto out;
}
i915_gem_object_pin_pages(obj);
out:
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
}
示例14: sg_copy
/**
* sg_copy - copy one SG vector to another
* @dst_sg: destination SG
* @src_sg: source SG
* @nents_to_copy: maximum number of entries to copy
* @copy_len: maximum amount of data to copy. If 0, then copy all.
* @d_km_type: kmap_atomic type for the destination SG
* @s_km_type: kmap_atomic type for the source SG
*
* Description:
* Data from the source SG vector will be copied to the destination SG
* vector. End of the vectors will be determined by sg_next() returning
* NULL. Returns number of bytes copied.
*/
int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
int nents_to_copy, size_t copy_len,
enum km_type d_km_type, enum km_type s_km_type)
{
int res = 0;
size_t dst_len, dst_offs;
if (copy_len == 0)
copy_len = 0x7FFFFFFF; /* copy all */
if (nents_to_copy == 0)
nents_to_copy = 0x7FFFFFFF; /* copy all */
dst_len = dst_sg->length;
dst_offs = dst_sg->offset;
do {
int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
src_sg, copy_len, d_km_type, s_km_type);
copy_len -= copied;
res += copied;
if ((copy_len == 0) || (dst_sg == NULL))
goto out;
nents_to_copy--;
if (nents_to_copy == 0)
goto out;
src_sg = sg_next(src_sg);
} while (src_sg != NULL);
out:
return res;
}
示例15: sg_miter_next
/**
* sg_miter_next - proceed mapping iterator to the next mapping
* @miter: sg mapping iter to proceed
*
* Description:
* Proceeds @[email protected] to the next mapping. @[email protected] should have been
* started using sg_miter_start(). On successful return,
* @[email protected]>page, @[email protected]>addr and @[email protected]>length point to the
* current mapping.
*
* Context:
* IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
* @[email protected] is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
* list is reached.
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
unsigned int off, len;
/* check for end and drop resources from the last iteration */
if (!miter->__nents)
return false;
sg_miter_stop(miter);
/* get to the next sg if necessary. __offset is adjusted by stop */
while (miter->__offset == miter->__sg->length) {
if (--miter->__nents) {
miter->__sg = sg_next(miter->__sg);
miter->__offset = 0;
} else
return false;
}
/* map the next page */
off = miter->__sg->offset + miter->__offset;
len = miter->__sg->length - miter->__offset;
miter->page = VMM_PAGE_NTH(sg_page(miter->__sg), off >> VMM_PAGE_SHIFT);
off &= ~VMM_PAGE_MASK;
miter->length = min_t(unsigned int, len, VMM_PAGE_SIZE - off);
miter->consumed = miter->length;
miter->addr = (void *)(miter->page + off);
return true;
}