本文整理汇总了C++中outer_inv_range函数的典型用法代码示例。如果您正苦于以下问题:C++ outer_inv_range函数的具体用法?C++ outer_inv_range怎么用?C++ outer_inv_range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了outer_inv_range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: meson_trustzone_efuse
int meson_trustzone_efuse(struct efuse_hal_api_arg* arg)
{
int ret;
if (!arg) {
return -1;
}
set_cpus_allowed_ptr(current, cpumask_of(0));
__cpuc_flush_dcache_area(__va(arg->buffer_phy), arg->size);
outer_clean_range((arg->buffer_phy), (arg->buffer_phy + arg->size));
__cpuc_flush_dcache_area(__va(arg->retcnt_phy), sizeof(unsigned int));
outer_clean_range(arg->retcnt_phy, (arg->retcnt_phy + sizeof(unsigned int)));
__cpuc_flush_dcache_area((void*)arg, sizeof(struct efuse_hal_api_arg));
outer_clean_range(__pa(arg), __pa(arg + 1));
ret = meson_smc_hal_api(TRUSTZONE_HAL_API_EFUSE, __pa(arg));
if (arg->cmd == EFUSE_HAL_API_READ) {
outer_inv_range((arg->buffer_phy), (arg->buffer_phy + arg->size));
dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);
}
outer_inv_range((arg->retcnt_phy), (arg->retcnt_phy + sizeof(unsigned int)));
dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);
return ret;
}
示例2: tf_rpc_init
/* Check protocol version returned by the PA */
static u32 tf_rpc_init(struct tf_comm *comm)
{
u32 protocol_version;
u32 rpc_error = RPC_SUCCESS;
dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));
#if 0
dmac_flush_range((void *)comm->l1_buffer,
(void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
outer_inv_range(__pa(comm->l1_buffer),
__pa(comm->l1_buffer) + PAGE_SIZE);
#endif
protocol_version = comm->l1_buffer->protocol_version;
if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
!= TF_S_PROTOCOL_MAJOR_VERSION) {
dpr_err("SMC: Unsupported SMC Protocol PA Major "
"Version (0x%02x, expected 0x%02x)!\n",
GET_PROTOCOL_MAJOR_VERSION(protocol_version),
TF_S_PROTOCOL_MAJOR_VERSION);
rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
} else {
rpc_error = RPC_SUCCESS;
}
spin_unlock(&(comm->lock));
return rpc_error;
}
示例3: tzasc_test_init
static int __init tzasc_test_init(void)
{
int ret;
tzasc_buf_phys = ALIGN(virt_to_phys(tzasc_buf), PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);
__flush_dcache_area(phys_to_virt(tzasc_buf_phys),
PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);
outer_inv_range(tzasc_buf_phys, tzasc_buf_phys +
(PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER));
ret = misc_register(&tzasc_dev);
if (ret) {
pr_err("misc device registration failed\n");
goto out;
}
ret = device_create_file(tzasc_dev.this_device, &dev_attr_physaddr);
if (ret) {
pr_err("physaddr sysfs file creation failed\n");
goto out_deregister;
}
return 0;
out_deregister:
misc_deregister(&tzasc_dev);
out:
return ret;
}
示例4: osl_cache_inv
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
unsigned long paddr;
dmac_map_area(va, size, DMA_RX);
paddr = __pa(va);
outer_inv_range(paddr, paddr + size);
/* WAR : Call it once more, to make sure INVALIDATE really happens.
* On 4708 ARM platforms, intermittently, we are seeing corrupt/dirty data after
* INVALIDATE. Calling outer_inv_range twice seems to solve the problem.
*/
dmac_map_area(va, size, DMA_RX);
paddr = __pa(va);
outer_inv_range(paddr, paddr + size);
}
示例5: vcm_mem_allocator
static int vcm_mem_allocator(vcm_allocator *info, ump_dd_mem *descriptor)
{
unsigned long num_blocks;
int i;
struct vcm_phys *phys;
struct vcm_phys_part *part;
int size_total = 0;
struct ump_vcm *ump_vcm;
ump_vcm = (struct ump_vcm*)descriptor->backend_info;
ump_vcm->vcm_res =
vcm_make_binding(ump_vcm->vcm, descriptor->size_bytes,
ump_vcm->dev_id, 0);
phys = ump_vcm->vcm_res->phys;
part = phys->parts;
num_blocks = phys->count;
DBG_MSG(5,
("Allocating page array. Size: %lu, VCM Reservation : 0x%x\n",
phys->count * sizeof(ump_dd_physical_block),
ump_vcm->vcm_res->start));
/* Now, make a copy of the block information supplied by the user */
descriptor->block_array =
(ump_dd_physical_block *) vmalloc(sizeof(ump_dd_physical_block) *
num_blocks);
if (NULL == descriptor->block_array) {
vfree(descriptor->block_array);
DBG_MSG(1, ("Could not allocate a mem handle for function.\n"));
return 0; /* failure */
}
for (i = 0; i < num_blocks; i++) {
descriptor->block_array[i].addr = part->start;
descriptor->block_array[i].size = part->size;
dmac_unmap_area(phys_to_virt(part->start), part->size, DMA_FROM_DEVICE);
outer_inv_range(part->start, part->start + part->size);
++part;
size_total += descriptor->block_array[i].size;
DBG_MSG(6,
("UMP memory created with VCM. addr 0x%x, size: 0x%x\n",
descriptor->block_array[i].addr,
descriptor->block_array[i].size));
}
descriptor->size_bytes = size_total;
descriptor->nr_blocks = num_blocks;
descriptor->ctx = NULL;
info->num_vcm_blocks += num_blocks;
return 1;
}
示例6: memory_engine_cache
int memory_engine_cache(memory_engine_t *engine, uint cmd,
shm_driver_operation_t op)
{
int res = 0;
memory_node_t *node;
char tag_clean[] = "clean";
char tag_invalidate[] = "invalidate";
char tag_cleanAndinvalidate[] = "clean and invalidate";
char *ptr_tag;
if (engine == NULL) {
return -EINVAL;
}
down(&(engine->m_mutex));
node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
op.m_param3, op.m_param2);
if ((node == NULL) || (node->m_next_free != NULL)) {
res = 0;
if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
ptr_tag = tag_invalidate;
} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
ptr_tag = tag_clean;
} else {
ptr_tag = tag_cleanAndinvalidate;
}
up(&(engine->m_mutex));
return res;
}
up(&(engine->m_mutex));
switch (cmd) {
case SHM_DEVICE_CMD_INVALIDATE:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_FROM_DEVICE);
outer_inv_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEAN:
dmac_map_area((const void *)op.m_param1,
op.m_param2, DMA_TO_DEVICE);
outer_clean_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
dmac_flush_range((const void *)op.m_param1,
(const void *)(op.m_param1 +
op.m_param2));
outer_flush_range(op.m_param3,
op.m_param3 + op.m_param2);
break;
default:
res = -ENOTTY;
}
return res;
}
示例7: meson_trustzone_memconfig
int meson_trustzone_memconfig()
{
int ret;
struct memconfig_hal_api_arg arg;
arg.memconfigbuf_phy_addr = __pa(memsecure);
arg.memconfigbuf_count = MEMCONFIG_NUM;
__cpuc_flush_dcache_area(memsecure, sizeof(memsecure));
outer_clean_range(__pa(memsecure), (__pa(memsecure + MEMCONFIG_NUM)));
__cpuc_flush_dcache_area(&arg, sizeof(arg));
outer_clean_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);
ret = meson_smc_hal_api(TRUSTZONE_HAL_API_MEMCONFIG, __pa(&arg));
outer_inv_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);
dmac_unmap_area(&arg, sizeof(arg), DMA_FROM_DEVICE);
outer_inv_range(__pa(memsecure), __pa(memsecure + MEMCONFIG_NUM));
dmac_unmap_area(memsecure, sizeof(memsecure), DMA_FROM_DEVICE);
return ret;
}
示例8: ___dma_page_dev_to_cpu
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
/* FIXME: non-speculating: not required */
/* don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE)
outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
}
示例9: cacheperf
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
struct timespec beforets;
struct timespec afterts;
phys_addr_t pbuf = virt_to_phys(vbuf);
u32 pbufend, xfer_size, i;
long timeval;
xfer_size = START_SIZE;
while (xfer_size <= END_SIZE) {
pbufend = pbuf + xfer_size;
timeval = 0;
for (i = 0; i < try_cnt; i++) {
memset(vbuf, i, xfer_size);
getnstimeofday(&beforets);
switch (id) {
case CM_CLEAN:
if (l1)
dmac_map_area(vbuf, xfer_size,
DMA_TO_DEVICE);
if (l2)
outer_clean_range(pbuf, pbufend);
break;
case CM_INV:
if (l2)
outer_inv_range(pbuf, pbufend);
if (l1)
dmac_unmap_area(vbuf, xfer_size,
DMA_FROM_DEVICE);
break;
case CM_FLUSH:
if (l1)
dmac_flush_range(vbuf,
(void *)((u32) vbuf + xfer_size));
if (l2)
outer_flush_range(pbuf, pbufend);
break;
case CM_FLUSHALL:
if (l1)
flush_cache_all();
if (l2)
outer_flush_all();
break;
}
getnstimeofday(&afterts);
timeval += update_timeval(beforets, afterts);
}
printk(KERN_INFO "%lu\n", timeval/try_cnt);
xfer_size *= 2;
}
}
示例10: omap_tiler_cache_operation
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
unsigned long vaddr, enum cache_operation cacheop)
{
struct omap_tiler_info *info;
int n_pages;
phys_addr_t paddr = tiler_virt2phys(vaddr);
if (!buffer) {
pr_err("%s(): buffer is NULL\n", __func__);
return -EINVAL;
}
if (!buffer->cached) {
pr_err("%s(): buffer not mapped as cacheable\n", __func__);
return -EINVAL;
}
info = buffer->priv_virt;
if (!info) {
pr_err("%s(): tiler info of buffer is NULL\n", __func__);
return -EINVAL;
}
n_pages = info->n_tiler_pages;
if (len > (n_pages * PAGE_SIZE)) {
pr_err("%s(): size to flush is greater than allocated size\n",
__func__);
return -EINVAL;
}
if (TILER_PIXEL_FMT_PAGE != info->fmt) {
pr_err("%s(): only TILER 1D buffers can be cached\n",
__func__);
return -EINVAL;
}
#if 0
if (len > FULL_CACHE_FLUSH_THRESHOLD) {
on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
outer_flush_all();
return 0;
}
#endif
if (cacheop == CACHE_FLUSH) {
flush_cache_user_range(vaddr, vaddr + len);
outer_flush_range(paddr, paddr + len);
} else {
outer_inv_range(paddr, paddr + len);
dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE);
}
return 0;
}
示例11: mfc_mem_cache_inv
void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
{
unsigned long paddr;
paddr = __pa((unsigned long)start_addr);
outer_inv_range(paddr, paddr + size);
dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
/* OPT#1: kernel provide below function */
/*
dma_unmap_single(NULL, (void *)start_addr, size, DMA_FROM_DEVICE);
*/
}
示例12: ___dma_single_dev_to_cpu
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
/* don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE) {
unsigned long paddr = __pa(kaddr);
outer_inv_range(paddr, paddr + size);
}
dmac_unmap_area(kaddr, size, dir);
}
示例13: s5p_mfc_cache_inv
void s5p_mfc_cache_inv(void *alloc_ctx)
{
struct vb2_cma_phys_buf *buf = (struct vb2_cma_phys_buf *)alloc_ctx;
void *start_addr;
unsigned long size;
unsigned long paddr = (dma_addr_t)buf->paddr;
start_addr = (dma_addr_t *)phys_to_virt(buf->paddr);
size = buf->size;
outer_inv_range(paddr, paddr + size);
dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
}
示例14: kgsl_cache_range_op
static long kgsl_cache_range_op(unsigned long addr, int size,
unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
unsigned long end;
#endif
BUG_ON(addr & (KGSL_PAGESIZE - 1));
BUG_ON(size & (KGSL_PAGESIZE - 1));
if (flags & KGSL_CACHE_FLUSH)
dmac_flush_range((const void *)addr,
(const void *)(addr + size));
else
if (flags & KGSL_CACHE_CLEAN)
dmac_clean_range((const void *)addr,
(const void *)(addr + size));
else
dmac_inv_range((const void *)addr,
(const void *)(addr + size));
#ifdef CONFIG_OUTER_CACHE
for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
pte_t *pte_ptr, pte;
unsigned long physaddr;
if (flags & KGSL_CACHE_VMALLOC_ADDR)
physaddr = vmalloc_to_pfn((void *)end);
else
if (flags & KGSL_CACHE_USER_ADDR) {
pte_ptr = kgsl_get_pte_from_vaddr(end);
if (!pte_ptr)
return -EINVAL;
pte = *pte_ptr;
physaddr = pte_pfn(pte);
pte_unmap(pte_ptr);
} else
return -EINVAL;
physaddr <<= PAGE_SHIFT;
if (flags & KGSL_CACHE_FLUSH)
outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
else
if (flags & KGSL_CACHE_CLEAN)
outer_clean_range(physaddr,
physaddr + KGSL_PAGESIZE);
else
outer_inv_range(physaddr,
physaddr + KGSL_PAGESIZE);
}
#endif
return 0;
}
示例15: ___dma_page_cpu_to_dev
void BCMFASTPATH_HOST ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
}