本文整理汇总了C++中dma_cache_inv函数的典型用法代码示例。如果您正苦于以下问题:C++ dma_cache_inv函数的具体用法?C++ dma_cache_inv怎么用?C++ dma_cache_inv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dma_cache_inv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _au1xxx_dbdma_put_dest
/* Put a destination buffer into the DMA ring.
* This updates the destination pointer and byte count. Normally used
* to place an empty buffer into the ring for fifo to memory transfers.
*/
u32
_au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
/* I guess we could check this to be within the
* range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/* We should have multiple callers for a particular channel,
* an interrupt doesn't affect this pointer nor the descriptor,
* so no locking should be needed.
*/
dp = ctp->put_ptr;
/* If the descriptor is valid, we are way ahead of the DMA
* engine, so just return an error condition.
*/
if (dp->dscr_cmd0 & DSCR_CMD0_V)
return 0;
/* Load up buffer address and byte count */
/* Check flags */
if (flags & DDMA_FLAGS_IE)
dp->dscr_cmd0 |= DSCR_CMD0_IE;
if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_dest0 = virt_to_phys(buf);
dp->dscr_cmd1 = nbytes;
#if 0
printk("cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1 );
#endif
/*
* There is an errata on the Au1200/Au1550 parts that could result in
* "stale" data being DMA'd. It has to do with the snoop logic on the
* dache eviction buffer. NONCOHERENT_IO is on by default for these
* parts. If it is fixedin the future, these dma_cache_inv will just
* be nothing more than empty macros. See io.h.
* */
dma_cache_inv((unsigned long)buf,nbytes);
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
au_sync();
dma_cache_wback_inv((unsigned long)dp, sizeof(dp));
ctp->chan_ptr->ddma_dbell = 0;
/* Get next descriptor pointer.
*/
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* return something not zero.
*/
return nbytes;
}
示例2: powertecscsi_invalidate
static void
powertecscsi_invalidate(char *addr, long len, fasdmadir_t direction)
{
if (direction == DMA_OUT)
dma_cache_wback((unsigned long)addr, (unsigned long)len);
else
dma_cache_inv((unsigned long)addr, (unsigned long)len);
}
示例3: ocelot_copy_from_cache
void ocelot_copy_from_cache(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
if (cacheflush) {
dma_cache_inv(map->map_priv_2, map->size);
cacheflush = 0;
}
memcpy_fromio(to, map->map_priv_1 + from, len);
}
示例4: ath_hwcs_get_csum_from_desc
inline unsigned short
ath_hwcs_get_csum_from_desc(ath_hwcs_desc_t *d)
{
#if 0
dma_cache_inv((unsigned long)ath_hwcs_tx_desc, sizeof(ath_hwcs_desc_t));
return (unsigned short)((ath_hwcs_tx_desc->info.control.pktSize) & 0xffff);
#else
return (unsigned short)((uncached_cksum_desc->info.control.pktSize) & 0xffff);
#endif
}
示例5: rtl_cipher_crypt
int rtl_cipher_crypt(struct crypto_cipher *cipher, u8 bEncrypt,
struct rtl_cipher_ctx *ctx, u8 *src, unsigned int nbytes, u8 *iv, u8 *dst)
{
unsigned int bsize = crypto_cipher_blocksize(cipher);
u8 *key = bEncrypt ? ctx->key : ctx->mode & 0x20 ? ctx->aes_dekey : ctx->key;
rtl_ipsecScatter_t scatter[1];
u32 flag_encrypt = bEncrypt ? 4 : 0;
int err;
#ifdef CONFIG_RTK_VOIP_DBG
printk("%s: src=%p, len=%d, blk=%d, key=%p, iv=%p, dst=%p\n", __FUNCTION__,
src, nbytes, bsize, key, iv, dst);
rtl_crypto_hexdump((void *) src, nbytes);
rtl_crypto_hexdump((void *) key, ctx->key_length);
rtl_crypto_hexdump((void *) iv, bsize);
#endif
dma_cache_wback((u32) src, nbytes);
dma_cache_wback((u32) key, ctx->key_length);
dma_cache_wback((u32) iv, bsize);
scatter[0].len = (nbytes / bsize) * bsize;
scatter[0].ptr = (void *) CKSEG1ADDR(src);
/*
int32 rtl_ipsecEngine(uint32 modeCrypto, uint32 modeAuth,
uint32 cntScatter, rtl_ipsecScatter_t *scatter, void *pCryptResult,
uint32 lenCryptoKey, void* pCryptoKey,
uint32 lenAuthKey, void* pAuthKey,
void* pIv, void* pPad, void* pDigest,
uint32 a2eo, uint32 enl)
*/
err = rtl_ipsecEngine(ctx->mode | flag_encrypt,
-1, 1, scatter,
(void *) CKSEG1ADDR(dst),
ctx->key_length, (void *) CKSEG1ADDR(key),
0, NULL,
(void *) CKSEG1ADDR(iv), NULL, NULL,
0, scatter[0].len);
if (unlikely(err))
printk("%s: rtl_ipsecEngine failed\n", __FUNCTION__);
dma_cache_inv((u32) dst, nbytes);
#ifdef CONFIG_RTK_VOIP_DBG
printk("result:\n");
rtl_crypto_hexdump(dst, nbytes);
#endif
// return handled bytes, even err! (for blkcipher_walk)
return nbytes - scatter[0].len;
}
示例6: dma_setup
static int dma_setup(Scsi_Cmnd *cmd, int datainp)
{
struct WD33C93_hostdata *hdata = (struct WD33C93_hostdata *)cmd->host->hostdata;
struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) cmd->host->base;
struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->dma_bounce_buffer;
#ifdef DEBUG_DMA
printk("dma_setup: datainp<%d> hcp<%p> ",
datainp, hcp);
#endif
hdata->dma_dir = datainp;
/*
* wd33c93 shouldn't pass us bogus dma_setups, but
* it does:-( The other wd33c93 drivers deal with
* it the same way (which isn't that obvious).
* IMHO a better fix would be, not to do these
* dma setups in the first place
*/
if (cmd->SCp.ptr == NULL)
return 1;
fill_hpc_entries (&hcp, cmd->SCp.ptr,cmd->SCp.this_residual);
/* To make sure, if we trip an HPC bug, that we transfer
* every single byte, we tag on an extra zero length dma
* descriptor at the end of the chain.
*/
hcp->desc.pbuf = 0;
hcp->desc.cntinfo = (HPCDMA_EOX);
#ifdef DEBUG_DMA
printk(" HPCGO\n");
#endif
/* Start up the HPC. */
hregs->ndptr = PHYSADDR(hdata->dma_bounce_buffer);
if(datainp) {
dma_cache_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual);
hregs->ctrl = (HPC3_SCTRL_ACTIVE);
} else {
dma_cache_wback_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual);
hregs->ctrl = (HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR);
}
return 0;
}
示例7: arch_sync_dma_for_cpu
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv(paddr, size);
break;
default:
break;
}
}
示例8: _dma_cache_sync
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
}
}
示例9: ar2313_allocate_descriptors
static int ar2313_allocate_descriptors(struct net_device *dev)
{
struct ar2313_private *sp = dev->priv;
int size;
int j;
ar2313_descr_t *space;
if (sp->rx_ring != NULL) {
printk("%s: already done.\n", __FUNCTION__);
return 0;
}
size =
(sizeof(ar2313_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
space = kmalloc(size, GFP_KERNEL);
if (space == NULL)
return 1;
/* invalidate caches */
dma_cache_inv((unsigned int) space, size);
/* now convert pointer to KSEG1 */
space = (ar2313_descr_t *) KSEG1ADDR(space);
memset((void *) space, 0, size);
sp->rx_ring = space;
space += AR2313_DESCR_ENTRIES;
sp->tx_ring = space;
space += AR2313_DESCR_ENTRIES;
/* Initialize the transmit Descriptors */
for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
ar2313_descr_t *td = &sp->tx_ring[j];
td->status = 0;
td->devcs = DMA_TX1_CHAINED;
td->addr = 0;
td->descr =
virt_to_phys(&sp->
tx_ring[(j + 1) & (AR2313_DESCR_ENTRIES - 1)]);
}
return 0;
}
示例10: __get_free_pages
/*
DMA memory allocation, derived from pci_alloc_consistent.
*/
static void *dmaalloc(size_t size, dma_addr_t * dma_handle)
{
void *ret;
ret =
(void *) __get_free_pages(GFP_ATOMIC | GFP_DMA,
get_order(size));
if (ret != NULL) {
dma_cache_inv((unsigned long) ret, size);
if (dma_handle != NULL)
*dma_handle = virt_to_phys(ret);
/* bump virtual address up to non-cached area */
ret = (void *) KSEG1ADDR(ret);
}
return ret;
}
示例11: arch_sync_dma_for_device
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
}
示例12: __dma_sync
static inline void __dma_sync(unsigned long addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback(addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
break;
default:
BUG();
}
}
示例13: alloc_skb_rx
static INLINE struct sk_buff* alloc_skb_rx(void)
{
struct sk_buff *skb;
/* allocate memroy including trailer and padding */
skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
if ( skb != NULL ) {
/* must be burst length alignment and reserve two more bytes for MAC address alignment */
if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
/* pub skb in reserved area "skb->data - 4" */
*((struct sk_buff **)skb->data - 1) = skb;
wmb();
/* write back and invalidate cache */
dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
/* invalidate cache */
dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
}
return skb;
}
示例14: rc32434_rx_tasklet
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
struct net_device *dev = (struct net_device *)rx_data_dev;
struct rc32434_local* lp = netdev_priv(dev);
volatile DMAD_t rd = &lp->rd_ring[lp->rx_next_done];
struct sk_buff *skb, *skb_new;
u8* pkt_buf;
u32 devcs, count, pkt_len, pktuncrc_len;
volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
u32 received = 0;
int rx_work_limit = min(*budget,dev->quota);
#else
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
#endif
dma_cache_inv((u32)rd, sizeof(*rd));
while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
if(--rx_work_limit <0)
{
break;
}
#endif
/* init the var. used for the later operations within the while loop */
skb_new = NULL;
devcs = rd->devcs;
pkt_len = RCVPKT_LENGTH(devcs);
skb = lp->rx_skb[lp->rx_next_done];
if (count < 64) {
lp->stats.rx_errors++;
lp->stats.rx_dropped++;
}
else if ((devcs & ( ETHRX_ld_m)) != ETHRX_ld_m) {
/* check that this is a whole packet */
/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
lp->stats.rx_errors++;
lp->stats.rx_dropped++;
}
else if ( (devcs & ETHRX_rok_m) ) {
{
/* must be the (first and) last descriptor then */
pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
pktuncrc_len = pkt_len - 4;
/* invalidate the cache */
dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
/* Malloc up new buffer. */
skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);
if (skb_new != NULL){
/* Make room */
skb_put(skb, pktuncrc_len);
skb->protocol = eth_type_trans(skb, dev);
/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
netif_receive_skb(skb);
#else
netif_rx(skb);
#endif
dev->last_rx = jiffies;
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktuncrc_len;
if (IS_RCV_MP(devcs))
lp->stats.multicast++;
/* 16 bit align */
skb_reserve(skb_new, 2);
skb_new->dev = dev;
lp->rx_skb[lp->rx_next_done] = skb_new;
}
else {
ERR("no memory, dropping rx packet.\n");
lp->stats.rx_errors++;
lp->stats.rx_dropped++;
}
}
}
else {
/* This should only happen if we enable accepting broken packets */
lp->stats.rx_errors++;
lp->stats.rx_dropped++;
/* add statistics counters */
if (IS_RCV_CRC_ERR(devcs)) {
DBG(2, "RX CRC error\n");
lp->stats.rx_crc_errors++;
}
else if (IS_RCV_LOR_ERR(devcs)) {
//.........这里部分代码省略.........
示例15: rc32434_send_packet
/* transmit packet */
static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
unsigned long flags;
u32 length;
DMAD_t td;
spin_lock_irqsave(&lp->lock, flags);
td = &lp->td_ring[lp->tx_chain_tail];
/* stop queue when full, drop pkts if queue already full */
if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
lp->tx_full = 1;
if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
netif_stop_queue(dev);
}
else {
lp->stats.tx_dropped++;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
return 1;
}
}
lp->tx_count ++;
lp->tx_skb[lp->tx_chain_tail] = skb;
length = skb->len;
dma_cache_wback((u32)skb->data, skb->len);
/* Setup the transmit descriptor. */
dma_cache_inv((u32) td, sizeof(*td));
td->ca = CPHYSADDR(skb->data);
if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
if( lp->tx_chain_status == empty ) {
td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
}
else {
td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m; /* Update tail */
lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
lp->tx_chain_status = empty;
}
}
else {
if( lp->tx_chain_status == empty ) {
td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
lp->tx_chain_status = filled;
}
else {
td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
}
}
dma_cache_wback((u32) td, sizeof(*td));
dev->trans_start = jiffies;
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}