本文整理汇总了C++中dma_sync_single_for_device函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_sync_single_for_device函数的具体用法?C++ dma_sync_single_for_device怎么用?C++ dma_sync_single_for_device使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dma_sync_single_for_device函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sgdma_async_read
/* If hardware is busy, don't restart async read.
* if status register is 0 - meaning initial state, restart async read,
* probably for the first time when populating a receive buffer.
* If read status indicate not busy and a status, restart the async
* DMA read.
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
struct sgdma_descrip *descbase =
(struct sgdma_descrip *)priv->rx_dma_desc;
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
rxbuffer = queue_rx_peekhead(priv);
if (rxbuffer == NULL) {
netdev_err(priv->dev, "no rx buffers available\n");
return 0;
}
sgdma_setup_descrip(cdesc, /* current descriptor */
ndesc, /* next descriptor */
sgdma_rxphysaddr(priv, ndesc),
0, /* read addr 0 for rx dma */
rxbuffer->dma_addr, /* write addr for rx dma */
0, /* read 'til EOP */
0, /* EOP: NA for rx dma */
0, /* read fixed: NA for rx dma */
0); /* SOP: NA for rx DMA */
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
priv->sgdmadesclen,
DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
&csr->next_descrip);
iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
&csr->control);
return 1;
}
return 0;
}
示例2: lsdma_rxqbuf
/**
* lsdma_rxqbuf - Queue a read buffer
* @dma: DMA information structure
* @bufnum: buffer number
*
* Do everything which normally follows a copy from a driver buffer
* to a user buffer.
**/
static ssize_t
lsdma_rxqbuf (struct master_dma *dma, size_t bufnum)
{
unsigned int i;
struct lsdma_desc *desc;
if (bufnum != dma->cpu_buffer) {
return -EINVAL;
}
for (i = 0; i < dma->pointers_per_buf; i++) {
desc = dma->desc[dma->cpu_buffer * dma->pointers_per_buf + i];
dma_sync_single_for_device (dma->dev,
mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h),
(desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE),
DMA_FROM_DEVICE);
}
dma->cpu_buffer = (dma->cpu_buffer + 1) % dma->buffers;
dma->cpu_offset = 0;
return dma->bufsize;
}
示例3: tegra_start_dma_tx
static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
{
struct circ_buf *xmit;
xmit = &t->uport.state->xmit;
dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
uart_writeb(t, t->fcr_shadow, UART_FCR);
t->tx_bytes = bytes & ~(sizeof(u32)-1);
t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
t->tx_dma_req.size = t->tx_bytes;
t->tx_in_progress = TEGRA_TX_DMA;
tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
}
示例4: tegra_uart_copy_rx_to_tty
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
struct tty_port *tty, int count)
{
int copied;
tup->uport.icount.rx += count;
if (!tty) {
dev_err(tup->uport.dev, "No tty port\n");
return;
}
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
copied = tty_insert_flip_string_lock(tty,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count)
dev_err(tup->uport.dev, "RxData DMA copy to tty layer failed\n");
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
}
示例5: tegra_uart_start_rx_dma
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!tup->rx_dma_desc) {
dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
return -EIO;
}
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
count, DMA_TO_DEVICE);
tup->rx_bytes_requested = count;
tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
dma_async_issue_pending(tup->rx_dma_chan);
return 0;
}
示例6: alloc_pages
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
struct page *page = alloc_pages(pool->gfp_mask, pool->order);
if (!page)
return NULL;
/* this is only being used to flush the page for dma,
this api is not really suitable for calling from a driver
but no better way to flush a page for dma exist at this time */
#ifdef CONFIG_64BIT
dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
#else
arm_dma_ops.sync_single_for_device(NULL,
pfn_to_dma(NULL, page_to_pfn(page)),
PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
#endif
return page;
}
示例7: tegra_rx_dma_complete_callback
/*
* It is expected that the callers take the UART lock when this API is called.
*
* There are 2 contexts when this function is called:
*
* 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the
* dequue API which in-turn calls this callback. UART lock is taken during
* the call to the threshold callback.
*
* 2. UART ISR - UART calls the dequue API which in-turn will call this API.
* In this case, UART ISR takes the UART lock.
*/
static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
{
struct tegra_uart_port *t = req->dev;
struct uart_port *u = &t->uport;
struct tty_struct *tty = u->state->port.tty;
int copied;
/* If we are here, DMA is stopped */
dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
req->status);
if (req->bytes_transferred) {
t->uport.icount.rx += req->bytes_transferred;
dma_sync_single_for_cpu(t->uport.dev, req->dest_addr,
req->size, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(tty,
((unsigned char *)(req->virt_addr)),
req->bytes_transferred);
if (copied != req->bytes_transferred) {
WARN_ON(1);
dev_err(t->uport.dev, "Not able to copy uart data "
"to tty layer Req %d and coped %d\n",
req->bytes_transferred, copied);
}
dma_sync_single_for_device(t->uport.dev, req->dest_addr,
req->size, DMA_TO_DEVICE);
}
do_handle_rx_pio(t);
/* Push the read data later in caller place. */
if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
return;
spin_unlock(&u->lock);
tty_flip_buffer_push(u->state->port.tty);
spin_lock(&u->lock);
}
示例8: denali_read_page_raw
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
if (page != denali->page) {
dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
}
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
denali_enable_dma(denali, false);
memcpy(buf, denali->buf.buf, mtd->writesize);
memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
return 0;
}
示例9: sgdma_async_write
static int sgdma_async_write(struct altera_tse_private *priv,
struct sgdma_descrip *desc)
{
struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
if (sgdma_txbusy(priv))
return 0;
/* clear control and status */
iowrite32(0, &csr->control);
iowrite32(0x1f, &csr->status);
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->sgdmadesclen, DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
&csr->next_descrip);
iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
&csr->control);
return 1;
}
示例10: mvneta_send
static int mvneta_send(struct eth_device *edev, void *data, int len)
{
struct mvneta_port *priv = edev->priv;
struct txdesc *txdesc = priv->txdesc;
int ret, error, last_desc;
/* Flush transmit data */
dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE);
memset(txdesc, 0, sizeof(*txdesc));
/* Fill the Tx descriptor */
txdesc->cmd_sts = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
txdesc->buf_ptr = (u32)data;
txdesc->byte_cnt = len;
/* Increase the number of prepared descriptors (one), by writing
* to the 'NoOfWrittenDescriptors' field in the PTXSU register.
*/
writel(1, priv->reg + MVNETA_TXQ_UPDATE_REG(0));
/* The controller updates the number of transmitted descriptors in
* the Tx port status register (PTXS).
*/
ret = wait_on_timeout(TRANSFER_TIMEOUT, !mvneta_pending_tx(priv));
dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE);
if (ret) {
dev_err(&edev->dev, "transmit timeout\n");
return ret;
}
last_desc = readl(&txdesc->cmd_sts) & MVNETA_TXD_L_DESC;
error = readl(&txdesc->error);
if (last_desc && error & MVNETA_TXD_ERROR) {
dev_err(&edev->dev, "transmit error %d\n",
(error & TXD_ERROR_MASK) >> TXD_ERROR_SHIFT);
return -EIO;
}
示例11: octeon_mgmt_rx_fill_ring
static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
int port = p->port;
while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
unsigned int size;
union mgmt_port_ring_entry re;
struct sk_buff *skb;
/* CN56XX pass 1 needs 8 bytes of padding. */
size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
skb = netdev_alloc_skb(netdev, size);
if (!skb)
break;
skb_reserve(skb, NET_IP_ALIGN);
__skb_queue_tail(&p->rx_list, skb);
re.d64 = 0;
re.s.len = size;
re.s.addr = dma_map_single(p->dev, skb->data,
size,
DMA_FROM_DEVICE);
/* Put it in the ring. */
p->rx_ring[p->rx_next_fill] = re.d64;
dma_sync_single_for_device(p->dev, p->rx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
p->rx_next_fill =
(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
p->rx_current_fill++;
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
}
}
示例12: caam_jr_enqueue
/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: device of the job ring to be used. This device should have
* been assigned prior by caam_jr_register().
* @desc: points to a job descriptor that execute our request. All
* descriptors (and all referenced data) must be in a DMAable
* region, and all data references must be physical addresses
* accessible to CAAM (i.e. within a PAMU window granted
* to it).
* @cbk: pointer to a callback function to be invoked upon completion
* of this request. This has the form:
* callback(struct device *dev, u32 *desc, u32 stat, void *arg)
* where:
* @dev: contains the job ring device that processed this
* response.
* @desc: descriptor that initiated the request, same as
* "desc" being argued to caam_jr_enqueue().
* @status: untranslated status received from CAAM. See the
* reference manual for a detailed description of
* error meaning, or see the JRSTA definitions in the
* register header file
* @areq: optional pointer to an argument passed with the
* original request
* @areq: optional pointer to a user argument for use at callback
* time.
**/
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc,
u32 status, void *areq),
void *areq)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
unsigned long flags;
int head, tail, desc_size;
dma_addr_t desc_dma, inpbusaddr;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, desc_dma)) {
dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
return -EIO;
}
dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
dma_sync_single_for_device(dev, inpbusaddr,
sizeof(dma_addr_t) * JOBR_DEPTH,
DMA_TO_DEVICE);
spin_lock_irqsave(&jrp->inplock, flags);
head = jrp->head;
tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_irqrestore(&jrp->inplock, flags);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
}
head_entry = &jrp->entinfo[head];
head_entry->desc_addr_virt = desc;
head_entry->desc_size = desc_size;
head_entry->callbk = (void *)cbk;
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
dma_sync_single_for_device(dev, inpbusaddr,
sizeof(dma_addr_t) * JOBR_DEPTH,
DMA_TO_DEVICE);
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_irqrestore(&jrp->inplock, flags);
return 0;
}
示例13: kbase_mmu_sync_pgd
static void kbase_mmu_sync_pgd(struct device *dev,
dma_addr_t handle, size_t size)
{
dma_sync_single_for_device(dev, handle, size, DMA_TO_DEVICE);
}
示例14: hsu_dma_rx
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
struct uart_port *port = &up->port;
struct tty_struct *tty = port->state->port.tty;
int count;
if (!tty)
return;
/*
* First need to know how many is already transferred,
* then check if its a timeout DMA irq, and return
* the trail bytes out, push them up and reenable the
* channel
*/
/* Timeout IRQ, need wait some time, see Errata 2 */
if (int_sts & 0xf00)
udelay(2);
/* Stop the channel */
chan_writel(chan, HSU_CH_CR, 0x0);
count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
if (!count) {
/* Restart the channel before we leave */
chan_writel(chan, HSU_CH_CR, 0x3);
return;
}
del_timer(&chan->rx_timer);
dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/*
* Head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
tty_insert_flip_string(tty, dbuf->buf, count);
port->icount.rx += count;
dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/* Reprogram the channel */
chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
chan_writel(chan, HSU_CH_DCR, 0x1
| (0x1 << 8)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
tty_flip_buffer_push(tty);
chan_writel(chan, HSU_CH_CR, 0x3);
chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ;
add_timer(&chan->rx_timer);
}
示例15: myri_rx
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
struct recvq __iomem *rq = mp->rq;
struct recvq __iomem *rqa = mp->rqack;
int entry = sbus_readl(&rqa->head);
int limit = sbus_readl(&rqa->tail);
int drops;
DRX(("entry[%d] limit[%d] ", entry, limit));
if (entry == limit)
return;
drops = 0;
DRX(("\n"));
while (entry != limit) {
struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
u32 csum = sbus_readl(&rxdack->csum);
int len = sbus_readl(&rxdack->myri_scatters[0].len);
int index = sbus_readl(&rxdack->ctx);
struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
struct sk_buff *skb = mp->rx_skbs[index];
/* Ack it. */
sbus_writel(NEXT_RX(entry), &rqa->head);
/* Check for errors. */
DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
dma_sync_single_for_cpu(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
DRX(("ERROR["));
dev->stats.rx_errors++;
if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
DRX(("BAD_LENGTH] "));
dev->stats.rx_length_errors++;
} else {
DRX(("NO_PADDING] "));
dev->stats.rx_frame_errors++;
}
/* Return it to the LANAI. */
drop_it:
drops++;
DRX(("DROP "));
dev->stats.rx_dropped++;
dma_sync_single_for_device(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
goto next;
}
DRX(("len[%d] ", len));
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
u32 dma_addr;
DRX(("BIGBUFF "));
new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
if (new_skb == NULL) {
DRX(("skb_alloc(FAILED) "));
goto drop_it;
}
dma_unmap_single(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
mp->rx_skbs[index] = new_skb;
new_skb->dev = dev;
skb_put(new_skb, RX_ALLOC_SIZE);
dma_addr = dma_map_single(&mp->myri_op->dev,
new_skb->data,
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
/* Trim the original skb for the netif. */
DRX(("trim(%d) ", len));
skb_trim(skb, len);
} else {
struct sk_buff *copy_skb = dev_alloc_skb(len);
DRX(("SMALLBUFF "));
if (copy_skb == NULL) {
DRX(("dev_alloc_skb(FAILED) "));
goto drop_it;
}
/* DMA sync already done above. */
copy_skb->dev = dev;
DRX(("resv_and_put "));
skb_put(copy_skb, len);
skb_copy_from_linear_data(skb, copy_skb->data, len);
//.........这里部分代码省略.........