本文整理汇总了C++中dmac_flush_range函数的典型用法代码示例。如果您正苦于以下问题:C++ dmac_flush_range函数的具体用法?C++ dmac_flush_range怎么用?C++ dmac_flush_range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dmac_flush_range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _lzo1x_decompress_single
static int _lzo1x_decompress_single( uint32_t src_addr , uint32_t src_num , uint32_t dst_addr , uint32_t *dst_len )
{
uint32_t phy_src_addr,phy_dst_addr;
*dst_len=0;
phy_src_addr = lzo1x_virt_to_phys(src_addr);
phy_dst_addr = lzo1x_virt_to_phys(dst_addr);
//printk("phy_src_addr:0x%x phy_dst_addr:0x%x src_num:0x%x src_addr:0x%x dst_addr:%x\n",phy_src_addr,phy_dst_addr,src_num,src_addr,dst_addr);
if((phy_src_addr==(-1UL))||(phy_dst_addr==(-1UL))){
printk("lzo1x phy addr errors \n");
return -1;
}
spin_lock(&zipdec_lock);
ZipDec_SetSrcCfg(0 , phy_src_addr, src_num);
ZipDec_SetDestCfg(0, phy_dst_addr, ZIP_WORK_LENGTH);
dmac_flush_range((void *)(src_addr),(void *)(src_addr+src_num));
dmac_flush_range((void *)dst_addr,(void *)(dst_addr+ZIP_WORK_LENGTH));
ZipDec_Run();
spin_unlock(&zipdec_lock);
if(Zip_Dec_Wait(ZIPDEC_DONE_INT))
{
return -1;
}
*dst_len=ZIP_WORK_LENGTH;
return 0;
}
示例2: __dma_clear_buffer
static void __dma_clear_buffer(struct page *page, size_t size)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
if (!PageHighMem(page)) {
void *ptr = page_address(page);
if (ptr) {
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
} else {
phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
outer_flush_range(base, end);
}
}
示例3: Cache_wbInv
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#if 0
/*
* It appears that this #if 0'ed code doesn't actually perform the
* invalidate part of the wbInv, and it appears that Cache_wb() and Cache_inv()
* work properly, so for now we implement wbInv as a combination of the two
* individual functions.
*/
#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
outer_flush_range(__pa((UInt32)blockPtr),
__pa((UInt32)(blockPtr+byteCnt)) );
#else
dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif
#else
Cache_wb(blockPtr, byteCnt, type, wait);
Cache_inv(blockPtr, byteCnt, type, wait);
#endif
GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
示例4: qti_pfk_ice_set_key
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt)
{
struct scm_desc desc = {0};
int ret;
char *tzbuf_key = (char *)ice_key;
char *tzbuf_salt = (char *)ice_salt;
uint32_t smc_id = 0;
u32 tzbuflen_key = sizeof(ice_key);
u32 tzbuflen_salt = sizeof(ice_salt);
if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX)
return -EINVAL;
if (!key || !salt)
return -EINVAL;
if (!tzbuf_key || !tzbuf_salt)
return -ENOMEM;
memset(tzbuf_key, 0, tzbuflen_key);
memset(tzbuf_salt, 0, tzbuflen_salt);
memcpy(ice_key, key, tzbuflen_key);
memcpy(ice_salt, salt, tzbuflen_salt);
dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
smc_id = TZ_ES_SET_ICE_KEY_ID;
pr_debug(" %s , smc_id = 0x%x\n", __func__, smc_id);
desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
desc.args[0] = index;
desc.args[1] = virt_to_phys(tzbuf_key);
desc.args[2] = tzbuflen_key;
desc.args[3] = virt_to_phys(tzbuf_salt);
desc.args[4] = tzbuflen_salt;
ret = scm_call2_atomic(smc_id, &desc);
pr_debug(" %s , ret = %d\n", __func__, ret);
if (ret) {
pr_err("%s: Error: 0x%x\n", __func__, ret);
smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
desc.args[0] = index;
scm_call2_atomic(smc_id, &desc);
}
return ret;
}
示例5: isp_binging4awb_buf_alloc
static int32_t isp_binging4awb_buf_alloc(struct isp_k_private *isp_private, uint32_t len)
{
int32_t ret = 0x00;
#ifndef CONFIG_64BIT
uint32_t buf = 0x00;
void *ptr = NULL;
#endif
if (0x00 < len) {
isp_private->bing4awb_buf_len = len;
isp_private->bing4awb_buf_order = get_order(len);
isp_private->bing4awb_buf_addr = (unsigned long)__get_free_pages(GFP_KERNEL | __GFP_COMP,
isp_private->bing4awb_buf_order);
if (NULL == (void *)isp_private->bing4awb_buf_addr) {
printk("isp_binging4awb_buf_alloc: memory error, addr:0x%lx, len:0x%x, order:0x%x.\n",
isp_private->bing4awb_buf_addr,
isp_private->bing4awb_buf_len,
isp_private->bing4awb_buf_order);
return -1;
}
#ifndef CONFIG_64BIT
ptr = (void *)isp_private->bing4awb_buf_addr;
buf = virt_to_phys((volatile void *)isp_private->bing4awb_buf_addr);
dmac_flush_range(ptr, ptr + len);
outer_flush_range(__pa(ptr), __pa(ptr) + len);
#endif
}
return ret;
}
示例6: ion_cma_cache_ops
int ion_cma_cache_ops(struct ion_heap *heap,
struct ion_buffer *buffer, void *vaddr,
unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
return -EINVAL;
}
if (cma_heap_has_outer_cache) {
struct ion_cma_buffer_info *info = buffer->priv_virt;
outer_cache_op(info->handle, info->handle + length);
}
return 0;
}
示例7: platform_do_lowpower
static inline void platform_do_lowpower(unsigned int cpu)
{
/* Just enter wfi for now. TODO: Properly shut off the cpu. */
for (;;) {
msm_pm_cpu_enter_lowpower(cpu);
if (pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
pen_release = -1;
dmac_flush_range((void *)&pen_release,
(void *)(&pen_release + sizeof(pen_release)));
break;
}
/*
* getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* The trouble is, letting people know about this is not really
* possible, since we are currently running incoherently, and
* therefore cannot safely call printk() or anything else
*/
dmac_inv_range((void *)&pen_release,
(void *)(&pen_release + sizeof(pen_release)));
pr_debug("CPU%u: spurious wakeup call\n", cpu);
}
}
示例8: SDIO_SyncRead
SDIO_Status SDIO_SyncRead(SDIO_Handle Handle, SDIO_Request_t *Req)
{
struct sdio_func *func = (struct sdio_func *)Handle;
int rc;
void *tgt = Req->buffer;
if (Req->buffer_len >= DMA_THRESHOLD_SIZE) {
if (is_vmalloc_addr(tgt)) {
if (!spans_page(tgt, Req->buffer_len)) {
tgt = vmalloc_to_unity(tgt);
dmac_flush_range(Req->buffer,
Req->buffer + Req->buffer_len);
} else
tgt = sdio_dma_ptr;
}
}
if ((rc = sdio_memcpy_fromio(func, tgt, Req->peripheral_addr,
Req->buffer_len))) {
printk(KERN_ERR "%s: failed (%d)\n", __func__, rc);
return SDIO_FAILURE;
}
if (tgt == sdio_dma_ptr)
memcpy(Req->buffer, sdio_dma_ptr, Req->buffer_len);
return SDIO_SUCCESS;
}
示例9: tf_rpc_init
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
u32 protocol_version;
u32 rpc_error = RPC_SUCCESS;
dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));
#if 0
dmac_flush_range((void *)comm->l1_buffer,
(void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
outer_inv_range(__pa(comm->l1_buffer),
__pa(comm->l1_buffer) + PAGE_SIZE);
#endif
protocol_version = comm->l1_buffer->protocol_version;
if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
!= TF_S_PROTOCOL_MAJOR_VERSION) {
dpr_err("SMC: Unsupported SMC Protocol PA Major "
"Version (0x%02x, expected 0x%02x)!\n",
GET_PROTOCOL_MAJOR_VERSION(protocol_version),
TF_S_PROTOCOL_MAJOR_VERSION);
rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
} else {
rpc_error = RPC_SUCCESS;
}
spin_unlock(&(comm->lock));
return rpc_error;
}
示例10: ion_cp_cache_ops
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
return -EINVAL;
}
if (cp_heap->has_outer_cache) {
unsigned long pstart = buffer->priv_phys + offset;
outer_cache_op(pstart, pstart + length);
}
return 0;
}
示例11: ion_cp_cache_ops
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
unsigned int size_to_vmap, total_size;
int i, j;
void *ptr = NULL;
ion_phys_addr_t buff_phys = buffer->priv_phys;
if (!vaddr) {
/*
* Split the vmalloc space into smaller regions in
* order to clean and/or invalidate the cache.
*/
size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
total_size = buffer->size;
for (i = 0; i < total_size; i += size_to_vmap) {
size_to_vmap = min(size_to_vmap, total_size - i);
for (j = 0; j < 10 && size_to_vmap; ++j) {
ptr = ioremap(buff_phys, size_to_vmap);
if (ptr) {
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(ptr,
ptr + size_to_vmap);
outer_cache_op =
outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(ptr,
ptr + size_to_vmap);
outer_cache_op =
outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(ptr,
ptr + size_to_vmap);
outer_cache_op =
outer_flush_range;
break;
default:
return -EINVAL;
}
buff_phys += size_to_vmap;
break;
} else {
size_to_vmap >>= 1;
}
}
if (!ptr) {
pr_err("Couldn't io-remap the memory\n");
return -EINVAL;
}
iounmap(ptr);
}
} else {
示例12: Cache_wbInv
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
outer_flush_range(__pa((UInt32)blockPtr),
__pa((UInt32)(blockPtr+byteCnt)) );
#else
dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif
GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
示例13: memory_engine_cache
int memory_engine_cache(memory_engine_t *engine, uint cmd,
shm_driver_operation_t op)
{
int res = 0;
memory_node_t *node;
char tag_clean[] = "clean";
char tag_invalidate[] = "invalidate";
char tag_cleanAndinvalidate[] = "clean and invalidate";
char *ptr_tag;
if (engine == NULL) {
return -EINVAL;
}
down(&(engine->m_mutex));
node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
op.m_param3, op.m_param2);
if ((node == NULL) || (node->m_next_free != NULL)) {
res = 0;
if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
ptr_tag = tag_invalidate;
} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
ptr_tag = tag_clean;
} else {
ptr_tag = tag_cleanAndinvalidate;
}
up(&(engine->m_mutex));
return res;
}
up(&(engine->m_mutex));
switch (cmd) {
case SHM_DEVICE_CMD_INVALIDATE:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_FROM_DEVICE);
outer_inv_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEAN:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_TO_DEVICE);
outer_clean_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
dmac_flush_range((const void *)op.m_param1,
(const void *)(op.m_param1 +
op.m_param2));
outer_flush_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
default:
res = -ENOTTY;
}
return res;
}
示例14: mv_dma_init
int mv_dma_init(void)
{
#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
if (MV_FALSE == mvSocUnitIsMappedToThisCpu(IDMA))
{
printk(KERN_INFO"IDMA is not mapped to this CPU\n");
return -ENODEV;
}
#endif
printk(KERN_INFO "Use IDMA channels %d and %d for enhancing the following function:\n",
CPY_CHAN1, CPY_CHAN2);
#ifdef CONFIG_MV_IDMA_COPYUSER
printk(KERN_INFO " o Copy From/To user space operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMCOPY
printk(KERN_INFO " o memcpy() and memmove() operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMZERO
printk(KERN_INFO " o memzero() operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMZERO
DPRINTK(KERN_ERR "ZERO buffer address 0x%08x\n", (u32)dmaMemInitBuff);
asm_memzero(dmaMemInitBuff, sizeof(dmaMemInitBuff));
dmac_flush_range(dmaMemInitBuff, dmaMemInitBuff + sizeof(dmaMemInitBuff));
#endif
MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN1), 0);
MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN1), 0);
MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN1), ICCHR_ENDIAN_LITTLE
#ifdef MV_CPU_LE
| ICCHR_DESC_BYTE_SWAP_EN
#endif
);
MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN1), CPY_IDMA_CTRL_LOW_VALUE);
MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN2), 0);
MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN2), 0);
MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN2), ICCHR_ENDIAN_LITTLE
#ifdef MV_CPU_LE
| ICCHR_DESC_BYTE_SWAP_EN
#endif
);
MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN2), CPY_IDMA_CTRL_LOW_VALUE);
current_dma_channel = CPY_CHAN1;
dma_proc_entry = create_proc_entry("dma_copy", S_IFREG | S_IRUGO, 0);
dma_proc_entry->read_proc = dma_read_proc;
// dma_proc_entry->write_proc = dma_write_proc;
dma_proc_entry->nlink = 1;
idma_init = 1;
return 0;
}
示例15: dmmupwl_flush_cache
void dmmupwl_flush_cache(void *virAddr, IM_UINT32 phyAddr, IM_INT32 size)
{
IM_INFOMSG((IM_STR("%s(virAddr=0x%x, phyAddr=0x%x, size=%d)"), IM_STR(_IM_FUNC_), (IM_INT32)virAddr, phyAddr, size));
// Flush L1 using virtual address.
dmac_flush_range(virAddr, (void *)(virAddr + size - 1));
// Flush L2 using physical address.
outer_flush_range(phyAddr, phyAddr + size - 1);
}