本文整理汇总了C++中outer_flush_range函数的典型用法代码示例。如果您正苦于以下问题:C++ outer_flush_range函数的具体用法?C++ outer_flush_range怎么用?C++ outer_flush_range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了outer_flush_range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __dma_clear_buffer
static void __dma_clear_buffer(struct page *page, size_t size)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
if (!PageHighMem(page)) {
void *ptr = page_address(page);
if (ptr) {
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
} else {
phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
outer_flush_range(base, end);
}
}
示例2: Cache_wbInv
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#if 0
/*
* It appears that this #if 0'ed code doesn't actually perform the
* invalidate part of the wbInv, and it appears that Cache_wb() and Cache_inv()
* work properly, so for now we implement wbInv as a combination of the two
* individual functions.
*/
#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
outer_flush_range(__pa((UInt32)blockPtr),
__pa((UInt32)(blockPtr+byteCnt)) );
#else
dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif
#else
Cache_wb(blockPtr, byteCnt, type, wait);
Cache_inv(blockPtr, byteCnt, type, wait);
#endif
GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
示例3: mb_rproc_start
static int mb_rproc_start(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct platform_device *pdev = to_platform_device(dev);
struct mb_rproc_pdata *local = platform_get_drvdata(pdev);
const struct firmware *fw;
int ret;
dev_info(dev, "%s\n", __func__);
INIT_WORK(&workqueue, handle_event);
flush_cache_all();
outer_flush_range(local->mem_start, local->mem_end);
remoteprocdev = pdev;
ret = request_firmware(&fw, local->bootloader, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "request_firmware failed\n");
return ret;
}
/* Copy bootloader to memory */
memcpy(local->vbase, fw->data, fw->size);
release_firmware(fw);
/* Just for sure synchronize memories */
dsb();
/* Release Microblaze from reset */
gpio_set_value(local->reset_gpio, 0);
return 0;
}
示例4: isp_binging4awb_buf_alloc
static int32_t isp_binging4awb_buf_alloc(struct isp_k_private *isp_private, uint32_t len)
{
int32_t ret = 0x00;
#ifndef CONFIG_64BIT
uint32_t buf = 0x00;
void *ptr = NULL;
#endif
if (0x00 < len) {
isp_private->bing4awb_buf_len = len;
isp_private->bing4awb_buf_order = get_order(len);
isp_private->bing4awb_buf_addr = (unsigned long)__get_free_pages(GFP_KERNEL | __GFP_COMP,
isp_private->bing4awb_buf_order);
if (NULL == (void *)isp_private->bing4awb_buf_addr) {
printk("isp_binging4awb_buf_alloc: memory error, addr:0x%lx, len:0x%x, order:0x%x.\n",
isp_private->bing4awb_buf_addr,
isp_private->bing4awb_buf_len,
isp_private->bing4awb_buf_order);
return -1;
}
#ifndef CONFIG_64BIT
ptr = (void *)isp_private->bing4awb_buf_addr;
buf = virt_to_phys((volatile void *)isp_private->bing4awb_buf_addr);
dmac_flush_range(ptr, ptr + len);
outer_flush_range(__pa(ptr), __pa(ptr) + len);
#endif
}
return ret;
}
示例5: tegra114_flush_dcache
static void tegra114_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *virt = page_address(page) + offset;
__cpuc_flush_dcache_area(virt, size);
outer_flush_range(phys, phys + size);
}
示例6: memory_engine_cache
int memory_engine_cache(memory_engine_t *engine, uint cmd,
shm_driver_operation_t op)
{
int res = 0;
memory_node_t *node;
char tag_clean[] = "clean";
char tag_invalidate[] = "invalidate";
char tag_cleanAndinvalidate[] = "clean and invalidate";
char *ptr_tag;
if (engine == NULL) {
return -EINVAL;
}
down(&(engine->m_mutex));
node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
op.m_param3, op.m_param2);
if ((node == NULL) || (node->m_next_free != NULL)) {
res = 0;
if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
ptr_tag = tag_invalidate;
} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
ptr_tag = tag_clean;
} else {
ptr_tag = tag_cleanAndinvalidate;
}
up(&(engine->m_mutex));
return res;
}
up(&(engine->m_mutex));
switch (cmd) {
case SHM_DEVICE_CMD_INVALIDATE:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_FROM_DEVICE);
outer_inv_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEAN:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_TO_DEVICE);
outer_clean_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
dmac_flush_range((const void *)op.m_param1,
(const void *)(op.m_param1 +
op.m_param2));
outer_flush_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
default:
res = -ENOTTY;
}
return res;
}
示例7: zynq_cpun_start
int __cpuinit zynq_cpun_start(u32 address, int cpu)
{
u32 trampoline_code_size = &zynq_secondary_trampoline_end -
&zynq_secondary_trampoline;
if (cpu > ncores) {
pr_warn("CPU No. is not available in the system\n");
return -1;
}
/* MS: Expectation that SLCR are directly map and accessible */
/* Not possible to jump to non aligned address */
if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
/* Store pointer to ioremap area which points to address 0x0 */
static u8 __iomem *zero;
u32 trampoline_size = &zynq_secondary_trampoline_jump -
&zynq_secondary_trampoline;
zynq_slcr_cpu_stop(cpu);
if (__pa(PAGE_OFFSET)) {
zero = ioremap(0, trampoline_code_size);
if (!zero) {
pr_warn("BOOTUP jump vectors not accessible\n");
return -1;
}
} else {
zero = (__force u8 __iomem *)PAGE_OFFSET;
}
/*
* This is elegant way how to jump to any address
* 0x0: Load address at 0x8 to r0
* 0x4: Jump by mov instruction
* 0x8: Jumping address
*/
memcpy((__force void *)zero, &zynq_secondary_trampoline,
trampoline_size);
writel(address, zero + trampoline_size);
flush_cache_all();
outer_flush_range(0, trampoline_code_size);
smp_wmb();
if (__pa(PAGE_OFFSET))
iounmap(zero);
zynq_slcr_cpu_start(cpu);
return 0;
}
pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
return -1;
}
示例8: handle_event
static void handle_event(struct work_struct *work)
{
struct mb_rproc_pdata *local = platform_get_drvdata(remoteprocdev);
flush_cache_all();
outer_flush_range(local->mem_start, local->mem_end);
if (rproc_vq_interrupt(local->rproc, 0) == IRQ_NONE)
dev_info(&remoteprocdev->dev, "no message found in vqid 0\n");
}
示例9: dmmupwl_flush_cache
void dmmupwl_flush_cache(void *virAddr, IM_UINT32 phyAddr, IM_INT32 size)
{
IM_INFOMSG((IM_STR("%s(virAddr=0x%x, phyAddr=0x%x, size=%d)"), IM_STR(_IM_FUNC_), (IM_INT32)virAddr, phyAddr, size));
// Flush L1 using virtual address.
dmac_flush_range(virAddr, (void *)(virAddr + size - 1));
// Flush L2 using physical address.
outer_flush_range(phyAddr, phyAddr + size - 1);
}
示例10: __dma_clear_buffer
static void __dma_clear_buffer(struct page *page, size_t size)
{
void *ptr;
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
示例11: cacheperf
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
struct timespec beforets;
struct timespec afterts;
phys_addr_t pbuf = virt_to_phys(vbuf);
u32 pbufend, xfer_size, i;
long timeval;
xfer_size = START_SIZE;
while (xfer_size <= END_SIZE) {
pbufend = pbuf + xfer_size;
timeval = 0;
for (i = 0; i < try_cnt; i++) {
memset(vbuf, i, xfer_size);
getnstimeofday(&beforets);
switch (id) {
case CM_CLEAN:
if (l1)
dmac_map_area(vbuf, xfer_size,
DMA_TO_DEVICE);
if (l2)
outer_clean_range(pbuf, pbufend);
break;
case CM_INV:
if (l2)
outer_inv_range(pbuf, pbufend);
if (l1)
dmac_unmap_area(vbuf, xfer_size,
DMA_FROM_DEVICE);
break;
case CM_FLUSH:
if (l1)
dmac_flush_range(vbuf,
(void *)((u32) vbuf + xfer_size));
if (l2)
outer_flush_range(pbuf, pbufend);
break;
case CM_FLUSHALL:
if (l1)
flush_cache_all();
if (l2)
outer_flush_all();
break;
}
getnstimeofday(&afterts);
timeval += update_timeval(beforets, afterts);
}
printk(KERN_INFO "%lu\n", timeval/try_cnt);
xfer_size *= 2;
}
}
示例12: omap_tiler_cache_operation
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
unsigned long vaddr, enum cache_operation cacheop)
{
struct omap_tiler_info *info;
int n_pages;
phys_addr_t paddr = tiler_virt2phys(vaddr);
if (!buffer) {
pr_err("%s(): buffer is NULL\n", __func__);
return -EINVAL;
}
if (!buffer->cached) {
pr_err("%s(): buffer not mapped as cacheable\n", __func__);
return -EINVAL;
}
info = buffer->priv_virt;
if (!info) {
pr_err("%s(): tiler info of buffer is NULL\n", __func__);
return -EINVAL;
}
n_pages = info->n_tiler_pages;
if (len > (n_pages * PAGE_SIZE)) {
pr_err("%s(): size to flush is greater than allocated size\n",
__func__);
return -EINVAL;
}
if (TILER_PIXEL_FMT_PAGE != info->fmt) {
pr_err("%s(): only TILER 1D buffers can be cached\n",
__func__);
return -EINVAL;
}
#if 0
if (len > FULL_CACHE_FLUSH_THRESHOLD) {
on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
outer_flush_all();
return 0;
}
#endif
if (cacheop == CACHE_FLUSH) {
flush_cache_user_range(vaddr, vaddr + len);
outer_flush_range(paddr, paddr + len);
} else {
outer_inv_range(paddr, paddr + len);
dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE);
}
return 0;
}
示例13: handle_event
static void handle_event(struct work_struct *work)
{
struct virtqueue *vq;
flush_cache_all();
outer_flush_range(zynq_rpmsg_p->mem_start, zynq_rpmsg_p->mem_end);
vq = zynq_rpmsg_p->vrings[0].vq;
if (vring_interrupt(0, vq) == IRQ_NONE)
dev_dbg(&zynq_rpmsg_platform->dev, "no message found in vqid 0\n");
}
示例14: kgsl_cache_range_op
static long kgsl_cache_range_op(unsigned long addr, int size,
unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
unsigned long end;
#endif
BUG_ON(addr & (KGSL_PAGESIZE - 1));
BUG_ON(size & (KGSL_PAGESIZE - 1));
if (flags & KGSL_CACHE_FLUSH)
dmac_flush_range((const void *)addr,
(const void *)(addr + size));
else
if (flags & KGSL_CACHE_CLEAN)
dmac_clean_range((const void *)addr,
(const void *)(addr + size));
else
dmac_inv_range((const void *)addr,
(const void *)(addr + size));
#ifdef CONFIG_OUTER_CACHE
for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
pte_t *pte_ptr, pte;
unsigned long physaddr;
if (flags & KGSL_CACHE_VMALLOC_ADDR)
physaddr = vmalloc_to_pfn((void *)end);
else
if (flags & KGSL_CACHE_USER_ADDR) {
pte_ptr = kgsl_get_pte_from_vaddr(end);
if (!pte_ptr)
return -EINVAL;
pte = *pte_ptr;
physaddr = pte_pfn(pte);
pte_unmap(pte_ptr);
} else
return -EINVAL;
physaddr <<= PAGE_SHIFT;
if (flags & KGSL_CACHE_FLUSH)
outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
else
if (flags & KGSL_CACHE_CLEAN)
outer_clean_range(physaddr,
physaddr + KGSL_PAGESIZE);
else
outer_inv_range(physaddr,
physaddr + KGSL_PAGESIZE);
}
#endif
return 0;
}
示例15: __dma_clear_buffer
static void __dma_clear_buffer(struct page *page, size_t size)
{
if (!PageHighMem(page)) {
void *ptr = page_address(page);
if (ptr) {
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
} else {
phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
outer_flush_range(base, end);
}
}