本文整理汇总了C++中ION_IS_CACHED函数的典型用法代码示例。如果您正苦于以下问题:C++ ION_IS_CACHED函数的具体用法?C++ ION_IS_CACHED怎么用?C++ ION_IS_CACHED使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ION_IS_CACHED函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: container_of
void *ion_cp_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long flags)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
void *ret_value = NULL;
mutex_lock(&cp_heap->lock);
if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
((cp_heap->heap_protected == HEAP_PROTECTED) &&
!ION_IS_CACHED(flags))) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return NULL;
}
if (ION_IS_CACHED(flags))
ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
else
ret_value = ioremap(buffer->priv_phys, buffer->size);
if (!ret_value) {
ion_cp_release_region(cp_heap);
} else {
if (ION_IS_CACHED(buffer->flags))
++cp_heap->kmap_cached_count;
else
++cp_heap->kmap_uncached_count;
}
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
示例2: ion_cma_allocate
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
unsigned long flags)
{
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "Can't allocate buffer info\n");
return -ENOMEM;
}
if (!ION_IS_CACHED(flags))
info->cpu_addr = dma_alloc_writecombine(dev, len,
&(info->handle), 0);
else
info->cpu_addr = dma_alloc_nonconsistent(dev, len,
&(info->handle), 0);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
goto err;
}
info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!info->table) {
dev_err(dev, "Fail to allocate sg table\n");
goto free_mem;
}
info->is_cached = ION_IS_CACHED(flags);
ion_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
/* keep this for memory release */
buffer->priv_virt = info;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
free_mem:
if (!ION_IS_CACHED(flags))
dma_free_writecombine(dev, len, info->cpu_addr, info->handle);
else
dma_free_nonconsistent(dev, len, info->cpu_addr, info->handle);
err:
kfree(info);
return -ENOMEM;
}
示例3: ion_cp_heap_map_user
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret_value)
ion_cp_release_region(cp_heap);
else
++cp_heap->umap_count;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
示例4: ERR_PTR
void *ion_system_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long flags)
{
if (ION_IS_CACHED(flags))
return buffer->priv_virt;
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return ERR_PTR(-EINVAL);
}
}
示例5: ion_system_heap_map_user
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
if (ION_IS_CACHED(flags))
return remap_vmalloc_range(vma, buffer->priv_virt,
vma->vm_pgoff);
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
}
}
示例6: ion_secure_cma_allocate
static int ion_secure_cma_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len, unsigned long align,
unsigned long flags)
{
unsigned long secure_allocation = flags & ION_FLAG_SECURE;
struct ion_secure_cma_buffer_info *buf = NULL;
if (!secure_allocation) {
pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
__func__, heap->name, flags);
return -ENOMEM;
}
if (ION_IS_CACHED(flags)) {
pr_err("%s: cannot allocate cached memory from secure heap %s\n",
__func__, heap->name);
return -ENOMEM;
}
buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
if (buf) {
int ret;
buf->secure.want_delayed_unsecure = 0;
atomic_set(&buf->secure.secure_cnt, 0);
mutex_init(&buf->secure.lock);
buf->secure.is_secure = 1;
buf->secure.ignore_check = true;
/*
* make sure the size is set before trying to secure
*/
buffer->size = len;
ret = ion_cp_secure_buffer(buffer, ION_CP_V2, 0, 0);
if (ret) {
/*
* Don't treat the secure buffer failing here as an
* error for backwards compatibility reasons. If
* the secure fails, the map will also fail so there
* is no security risk.
*/
pr_debug("%s: failed to secure buffer\n", __func__);
}
return 0;
} else {
return -ENOMEM;
}
}
示例7: ion_cma_mmap
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
if (ION_IS_CACHED(buffer->flags))
return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
info->handle, buffer->size);
else
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
info->handle, buffer->size);
}
示例8: ion_system_contig_heap_map_user
int ion_system_contig_heap_map_user(struct ion_heap *heap,
struct ion_buffer *buffer,
struct vm_area_struct *vma,
unsigned long flags)
{
unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
if (ION_IS_CACHED(flags))
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
else {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
}
}
示例9: ion_cache_operations
static int ion_cache_operations(struct smem_client *client,
struct msm_smem *mem, enum smem_cache_ops cache_op)
{
unsigned long ionflag = 0;
int rc = 0;
int msm_cache_ops = 0;
if (!mem || !client) {
dprintk(VIDC_ERR, "Invalid params: %p, %p\n",
mem, client);
return -EINVAL;
}
rc = ion_handle_get_flags(client->clnt, mem->smem_priv,
&ionflag);
if (rc) {
dprintk(VIDC_ERR,
"ion_handle_get_flags failed: %d\n", rc);
goto cache_op_failed;
}
if (ION_IS_CACHED(ionflag)) {
switch (cache_op) {
case SMEM_CACHE_CLEAN:
msm_cache_ops = ION_IOC_CLEAN_CACHES;
break;
case SMEM_CACHE_INVALIDATE:
msm_cache_ops = ION_IOC_INV_CACHES;
break;
case SMEM_CACHE_CLEAN_INVALIDATE:
msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
break;
default:
dprintk(VIDC_ERR, "cache operation not supported\n");
rc = -EINVAL;
goto cache_op_failed;
}
rc = msm_ion_do_cache_op(client->clnt,
(struct ion_handle *)mem->smem_priv,
0, (unsigned long)mem->size,
msm_cache_ops);
if (rc) {
dprintk(VIDC_ERR,
"cache operation failed %d\n", rc);
goto cache_op_failed;
}
}
cache_op_failed:
return rc;
}
示例10: ion_cp_secure_buffer
int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
int flags)
{
int ret_value;
struct ion_cp_buffer *buf = buffer->priv_virt;
mutex_lock(&buf->lock);
if (!buf->is_secure) {
pr_err("%s: buffer %p was not allocated as secure\n",
__func__, buffer);
ret_value = -EINVAL;
goto out_unlock;
}
if (ION_IS_CACHED(buffer->flags)) {
pr_err("%s: buffer %p was allocated as cached\n",
__func__, buffer);
ret_value = -EINVAL;
goto out_unlock;
}
if (atomic_read(&buf->map_cnt)) {
pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d",
__func__, buffer, atomic_read(&buf->map_cnt));
ret_value = -EINVAL;
goto out_unlock;
}
if (atomic_read(&buf->secure_cnt)) {
if (buf->version != version || buf->data != data) {
pr_err("%s: Trying to re-secure buffer with different values",
__func__);
pr_err("Last secured version: %d Currrent %d\n",
buf->version, version);
pr_err("Last secured data: %p current %p\n",
buf->data, data);
ret_value = -EINVAL;
goto out_unlock;
}
}
ret_value = __ion_cp_protect_buffer(buffer, version, data, flags);
out_unlock:
mutex_unlock(&buf->lock);
return ret_value;
}
示例11: ion_cp_heap_unmap_kernel
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
__arch_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
mutex_lock(&cp_heap->lock);
if (ION_IS_CACHED(buffer->flags))
--cp_heap->kmap_cached_count;
else
--cp_heap->kmap_uncached_count;
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
return;
}
示例12: ION_IS_CACHED
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
void *virt_base, unsigned long flags)
{
int ret;
unsigned int offset = buffer->priv_phys - phys_base;
unsigned long start = ((unsigned long)virt_base) + offset;
const struct mem_type *type = ION_IS_CACHED(flags) ?
get_mem_type(MT_DEVICE_CACHED) :
get_mem_type(MT_DEVICE);
if (phys_base > buffer->priv_phys)
return NULL;
ret = ioremap_pages(start, buffer->priv_phys, buffer->size, type);
if (!ret)
return (void *)start;
else
return NULL;
}
示例13: ion_do_cache_op
static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *uaddr, unsigned long offset, unsigned long len,
unsigned int cmd)
{
struct ion_buffer *buffer;
int ret = -EINVAL;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to do_cache_op.\n",
__func__);
mutex_unlock(&client->lock);
return -EINVAL;
}
buffer = handle->buffer;
mutex_lock(&buffer->lock);
if (!ION_IS_CACHED(buffer->flags)) {
ret = 0;
goto out;
}
if (!handle->buffer->heap->ops->cache_op) {
pr_err("%s: cache_op is not implemented by this heap.\n",
__func__);
ret = -ENODEV;
goto out;
}
ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
offset, len, cmd);
out:
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return ret;
}
示例14: ion_cp_heap_unmap_kernel
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (cp_heap->reusable)
unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
else
__arm_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
mutex_lock(&cp_heap->lock);
if (ION_IS_CACHED(buffer->flags))
--cp_heap->kmap_cached_count;
else
--cp_heap->kmap_uncached_count;
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
return;
}
示例15: ion_map_iommu
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
int domain_num, int partition_num, unsigned long align,
unsigned long iova_length, unsigned long *iova,
unsigned long *buffer_size,
unsigned long flags, unsigned long iommu_flags)
{
struct ion_buffer *buffer;
struct ion_iommu_map *iommu_map;
int ret = 0;
if (ION_IS_CACHED(flags)) {
pr_err("%s: Cannot map iommu as cached.\n", __func__);
return -EINVAL;
}
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return -EINVAL;
}
buffer = handle->buffer;
mutex_lock(&buffer->lock);
if (!handle->buffer->heap->ops->map_iommu) {
pr_err("%s: map_iommu is not implemented by this heap.\n",
__func__);
ret = -ENODEV;
goto out;
}
/*
* If clients don't want a custom iova length, just use whatever
* the buffer size is
*/
if (!iova_length)
iova_length = buffer->size;
if (buffer->size > iova_length) {
pr_debug("%s: iova length %lx is not at least buffer size"
" %x\n", __func__, iova_length, buffer->size);
ret = -EINVAL;
goto out;
}
if (buffer->size & ~PAGE_MASK) {
pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
buffer->size, PAGE_SIZE);
ret = -EINVAL;
goto out;
}
if (iova_length & ~PAGE_MASK) {
pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
iova_length, PAGE_SIZE);
ret = -EINVAL;
goto out;
}
iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
if (!iommu_map) {
iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
align, iova_length, flags, iova);
if (IS_ERR_OR_NULL(iommu_map)) {
_ion_unmap(&buffer->iommu_map_cnt,
&handle->iommu_map_cnt);
} else {
iommu_map->flags = iommu_flags;
if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
kref_get(&iommu_map->ref);
}
} else {
if (iommu_map->flags != iommu_flags) {
pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
__func__, handle,
iommu_map->flags, iommu_flags);
_ion_unmap(&buffer->iommu_map_cnt,
&handle->iommu_map_cnt);
ret = -EINVAL;
} else if (iommu_map->mapped_size != iova_length) {
pr_err("%s: handle %p is already mapped with length"
" %x, trying to map with length %lx\n",
__func__, handle, iommu_map->mapped_size,
iova_length);
_ion_unmap(&buffer->iommu_map_cnt,
&handle->iommu_map_cnt);
ret = -EINVAL;
} else {
kref_get(&iommu_map->ref);
*iova = iommu_map->iova_addr;
}
}
*buffer_size = buffer->size;
out:
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
//.........这里部分代码省略.........