本文整理汇总了C++中sg_dma_len函数的典型用法代码示例。如果您正苦于以下问题:C++ sg_dma_len函数的具体用法?C++ sg_dma_len怎么用?C++ sg_dma_len使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sg_dma_len函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mv_cesa_req_dma_iter_next_transfer
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
struct mv_cesa_sg_dma_iter *sgiter,
unsigned int len)
{
if (!sgiter->sg)
return false;
sgiter->op_offset += len;
sgiter->offset += len;
if (sgiter->offset == sg_dma_len(sgiter->sg)) {
if (sg_is_last(sgiter->sg))
return false;
sgiter->offset = 0;
sgiter->sg = sg_next(sgiter->sg);
}
if (sgiter->op_offset == iter->op_len)
return false;
return true;
}
示例2: viafb_dma_copy_out_sg
/*
* Do a scatter/gather DMA copy from FB memory. You must have done
* a successful call to viafb_request_dma() first.
*/
int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
{
struct viafb_vx855_dma_descr *descr;
void *descrpages;
dma_addr_t descr_handle;
unsigned long flags;
int i;
struct scatterlist *sgentry;
dma_addr_t nextdesc;
/*
* Get a place to put the descriptors.
*/
descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
nsg*sizeof(struct viafb_vx855_dma_descr),
&descr_handle, GFP_KERNEL);
if (descrpages == NULL) {
dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
return -ENOMEM;
}
mutex_lock(&viafb_dma_lock);
/*
* Fill them in.
*/
descr = descrpages;
nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
for_each_sg(sg, sgentry, nsg, i) {
dma_addr_t paddr = sg_dma_address(sgentry);
descr->addr_low = paddr & 0xfffffff0;
descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
descr->fb_offset = offset;
descr->seg_size = sg_dma_len(sgentry) >> 4;
descr->tile_mode = 0;
descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
descr->pad = 0xffffffff; /* VIA driver does this */
offset += sg_dma_len(sgentry);
nextdesc += sizeof(struct viafb_vx855_dma_descr);
descr++;
}
示例3: videobuf_dma_map
int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
if (dma->pages) {
dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
dma->offset);
}
if (dma->vmalloc) {
dma->sglist = videobuf_vmalloc_to_sg
(dma->vmalloc,dma->nr_pages);
}
if (dma->bus_addr) {
dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
if (NULL != dma->sglist) {
dma->sglen = 1;
sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
}
}
if (NULL == dma->sglist) {
dprintk(1,"scatterlist is NULL\n");
return -ENOMEM;
}
if (!dma->bus_addr) {
dma->sglen = dma_map_sg(q->dev, dma->sglist,
dma->nr_pages, dma->direction);
if (0 == dma->sglen) {
printk(KERN_WARNING
"%s: videobuf_map_sg failed\n",__func__);
kfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return -EIO;
}
}
return 0;
}
示例4: dma_start
static int dma_start(struct rk_mmc *host)
{
int i, res, direction, sg_len;
enum rk29_dmasrc src;
struct mmc_data *data = host->data;
BUG_ON(!data);
host->dma_xfer_size = 0;
if (data->flags & MMC_DATA_READ){
direction = DMA_FROM_DEVICE;
src = RK29_DMASRC_HW;
}else{
direction = DMA_TO_DEVICE;
src = RK29_DMASRC_MEM;
}
sg_len = rk_mmc_pre_dma_transfer(host, host->data, 0);
if(sg_len < 0){
host->ops->stop(host);
return sg_len;
}
res = rk29_dma_devconfig(MMC_DMA_CHN, src, host->dma_addr);
if(unlikely(res < 0))
return res;
for(i = 0; i < sg_len; i++){
res = rk29_dma_enqueue(MMC_DMA_CHN, host,
sg_dma_address(&data->sg[i]),
sg_dma_len(&data->sg[i]));
if(unlikely(res < 0))
return res;
}
res = rk29_dma_ctrl(MMC_DMA_CHN, RK29_DMAOP_START);
if(unlikely(res < 0))
return res;
return res;
}
示例5: mmc_dma_rx_start
/* Supports scatter/gather */
static void mmc_dma_rx_start(struct mmci_host *host)
{
unsigned int len;
int i, dma_len;
struct scatterlist *sg;
struct mmc_request *mrq = host->mrq;
struct mmc_data *reqdata = mrq->data;
void *dmaaddr;
u32 dmalen, dmaxferlen;
sg = reqdata->sg;
len = reqdata->sg_len;
dma_len = dma_map_sg(
mmc_dev(host->mmc), reqdata->sg, reqdata->sg_len,
DMA_FROM_DEVICE);
if (dma_len == 0)
return;
/* Setup transfer */
for (i = 0; i < len; i++) {
dmalen = (u32) sg_dma_len(&sg[i]);
dmaaddr = (void *) sg_dma_address(&sg[i]);
/* Build a list with a max size if 15872 bytes per seg */
while (dmalen > 0) {
dmaxferlen = dmalen;
if (dmaxferlen > 15872)
dmaxferlen = 15872;
lpc178x_dma_queue_llist_entry(dmac_drvdat.lastch,
(void *) SD_FIFO((u32)host->base),
dmaaddr, dmaxferlen);
dmaaddr += dmaxferlen;
dmalen -= dmaxferlen;
}
}
}
示例6: tegra_gem_prime_map_dma_buf
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *gem = attach->dmabuf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
struct sg_table *sgt;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
if (bo->pages) {
struct scatterlist *sg;
unsigned int i;
if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
goto free;
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
goto free;
} else {
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free;
sg_dma_address(sgt->sgl) = bo->paddr;
sg_dma_len(sgt->sgl) = gem->size;
}
return sgt;
free:
sg_free_table(sgt);
kfree(sgt);
return NULL;
}
示例7: tegra_gem_prime_map_dma_buf
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *gem = attach->dmabuf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
struct sg_table *sgt;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
kfree(sgt);
return NULL;
}
sg_dma_address(sgt->sgl) = bo->paddr;
sg_dma_len(sgt->sgl) = gem->size;
return sgt;
}
示例8: fill_xfer_opecodes
static unsigned int fill_xfer_opecodes(
struct opecode* op_ptr ,
struct scatterlist* sg_list ,
unsigned int sg_nums ,
bool xfer_first,
bool xfer_last ,
unsigned int xfer_mode
)
{
struct scatterlist* curr_sg;
int sg_index;
unsigned int op_count;
dma_addr_t dma_address;
unsigned int dma_length;
bool dma_last;
if (IS_ERR_OR_NULL(op_ptr) || IS_ERR_OR_NULL(sg_list) || (sg_nums < 1)) {
return 0;
}
op_count = 0;
for_each_sg(sg_list, curr_sg, sg_nums, sg_index) {
dma_address = sg_dma_address(curr_sg);
dma_length = sg_dma_len(curr_sg);
dma_last = (sg_index >= sg_nums-1) ? xfer_last : 0;
set_xfer_opecode(
op_ptr , /* struct opecode* op_ptr */
0 , /* bool fetch */
0 , /* bool done */
xfer_first , /* bool xfer_first */
dma_last , /* bool xfer_last */
dma_address, /* dma_addr_t addr */
dma_length , /* unsigned int size */
xfer_mode /* unsigned int mode */
);
op_count++;
op_ptr++;
xfer_first = 0;
}
示例9: samsung_dmadev_prepare
static int samsung_dmadev_prepare(unsigned ch,
struct samsung_dma_prep_info *info)
{
struct scatterlist sg;
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_async_tx_descriptor *desc;
switch (info->cap) {
case DMA_SLAVE:
sg_init_table(&sg, 1);
sg_dma_len(&sg) = info->len;
sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
info->len, offset_in_page(info->buf));
sg_dma_address(&sg) = info->buf;
desc = chan->device->device_prep_slave_sg(chan,
&sg, 1, info->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
desc = chan->device->device_prep_dma_cyclic(chan,
info->buf, info->len, info->period, info->direction);
break;
default:
dev_err(&chan->dev->device, "unsupported format\n");
return -EFAULT;
}
if (!desc) {
dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
return -EFAULT;
}
desc->callback = info->fp;
desc->callback_param = info->fp_param;
dmaengine_submit((struct dma_async_tx_descriptor *)desc);
return 0;
}
示例10: usdhi6_sg_unmap
/* Unmap the current page: common for multiple and single block IO */
static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
{
struct mmc_data *data = host->mrq->data;
struct page *page = host->head_pg.page;
if (page) {
/* Previous block was cross-page boundary */
struct scatterlist *sg = data->sg_len > 1 ?
host->sg : data->sg;
size_t blk_head = host->head_len;
if (!data->error && data->flags & MMC_DATA_READ) {
memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
host->bounce_buf, blk_head);
memcpy(host->pg.mapped, host->bounce_buf + blk_head,
data->blksz - blk_head);
}
flush_dcache_page(page);
kunmap(page);
host->head_pg.page = NULL;
if (!force && sg_dma_len(sg) + sg->offset >
(host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
/* More blocks in this SG, don't unmap the next page */
return;
}
page = host->pg.page;
if (!page)
return;
flush_dcache_page(page);
kunmap(page);
host->pg.page = NULL;
}
示例11: camera_core_start_overlay
static void
camera_core_start_overlay(struct camera_device *cam)
{
int err;
unsigned long irqflags;
if (!cam->previewing)
return;
spin_lock_irqsave(&cam->overlay_lock, irqflags);
sg_dma_address(&cam->overlay_sglist) = cam->overlay_base_phys;
sg_dma_len(&cam->overlay_sglist)= cam->pix.sizeimage;
while (cam->overlay_cnt < 2) {
err = camera_core_sgdma_queue(cam, &cam->overlay_sglist, 1,
camera_core_overlay_callback, NULL);
if (err)
break;
++cam->overlay_cnt;
}
spin_unlock_irqrestore(&cam->overlay_lock, irqflags);
}
示例12: hptiop_buildsgl
static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
struct scatterlist *sg;
int idx, nseg;
nseg = scsi_dma_map(scp);
BUG_ON(nseg < 0);
if (!nseg)
return 0;
HPT_SCP(scp)->sgcnt = nseg;
HPT_SCP(scp)->mapped = 1;
BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
psg[idx].size = cpu_to_le32(sg_dma_len(sg));
psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
cpu_to_le32(1) : 0;
}
示例13: vb2_dma_contig_map_dmabuf
static void vb2_dma_contig_map_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
struct dma_buf *dmabuf;
struct sg_table *sg;
enum dma_data_direction dir;
if (!buf || !buf->db_attach)
return;
WARN_ON(buf->dma_addr);
dmabuf = buf->db_attach->dmabuf;
/* TODO need a way to know if we are camera or display, etc.. */
dir = DMA_BIDIRECTIONAL;
/* get the associated sg for this buffer */
sg = dma_buf_map_attachment(buf->db_attach, dir);
if (!sg)
return;
/*
* convert sglist to paddr:
* Assumption: for dma-contig, dmabuf would map to single entry
* Will print a warning if it has more than one.
*/
if (sg->nents > 1)
printk(KERN_WARNING
"dmabuf scatterlist has more than 1 entry\n");
buf->dma_addr = sg_dma_address(sg->sgl);
buf->size = sg_dma_len(sg->sgl);
/* save this sg in dmabuf for put_scatterlist */
dmabuf->priv = sg;
}
示例14: kzalloc
static struct sg_table *omap_gem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
dma_addr_t dma_addr;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
/* camera, etc, need physically contiguous.. but we need a
* better way to know this..
*/
ret = omap_gem_pin(obj, &dma_addr);
if (ret)
goto out;
ret = sg_alloc_table(sg, 1, GFP_KERNEL);
if (ret)
goto out;
sg_init_table(sg->sgl, 1);
sg_dma_len(sg->sgl) = obj->size;
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
sg_dma_address(sg->sgl) = dma_addr;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
return sg;
out:
kfree(sg);
return ERR_PTR(ret);
}
示例15: omap2_mcspi_tx_dma
static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct spi_transfer *xfer,
struct dma_slave_config cfg)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
sg_init_table(&sg, 1);
sg_dma_address(&sg) = xfer->tx_dma;
sg_dma_len(&sg) = xfer->len;
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi;
dmaengine_submit(tx);
} else {
/* FIXME: fall back to PIO? */
}
}
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
}