本文整理汇总了C++中pgprot_writecombine函数的典型用法代码示例。如果您正苦于以下问题:C++ pgprot_writecombine函数的具体用法?C++ pgprot_writecombine怎么用?C++ pgprot_writecombine使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgprot_writecombine函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rockchip_gem_alloc_iommu
static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
int ret;
ret = rockchip_gem_get_pages(rk_obj);
if (ret < 0)
return ret;
ret = rockchip_gem_iommu_map(rk_obj);
if (ret < 0)
goto err_free;
if (alloc_kmap) {
rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!rk_obj->kvaddr) {
DRM_ERROR("failed to vmap() buffer\n");
ret = -ENOMEM;
goto err_unmap;
}
}
return 0;
err_unmap:
rockchip_gem_iommu_unmap(rk_obj);
err_free:
rockchip_gem_put_pages(rk_obj);
return ret;
}
示例2: mips_dma_mmap
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
示例3: dma_alloc_writecombine
/*
* Allocate a writecombining region, in much the same way as
* dma_alloc_coherent above.
*/
void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_writecombine(pgprot_kernel),
__builtin_return_address(0));
}
示例4: arch_dma_mmap_pgprot
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot);
return prot;
}
示例5: imx_iram_audio_playback_mmap
/*
enable user space access to iram buffer
*/
static int imx_iram_audio_playback_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
struct snd_dma_buffer *buf = &substream->dma_buffer;
unsigned long off;
unsigned long phys;
unsigned long size;
int ret = 0;
area->vm_ops = &snd_mxc_audio_playback_vm_ops;
area->vm_private_data = substream;
off = area->vm_pgoff << PAGE_SHIFT;
phys = buf->addr + off;
size = area->vm_end - area->vm_start;
if (off + size > SND_RAM_SIZE)
return -EINVAL;
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
area->vm_flags |= VM_IO;
ret =
remap_pfn_range(area, area->vm_start, phys >> PAGE_SHIFT,
size, area->vm_page_prot);
if (ret == 0)
area->vm_ops->open(area);
return ret;
}
示例6: drm_gem_mmap_obj
/**
* drm_gem_mmap_obj - memory map a GEM object
* @obj: the GEM object to map
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver. Depending on their requirements, drivers can either
* provide a fault handler in their gem_vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
*
* This function is mainly intended to implement the DMABUF mmap operation, when
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
* NOTE: This function has to be protected with dev->struct_mutex
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
lockdep_assert_held(&dev->struct_mutex);
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!dev->driver->gem_vm_ops)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
return 0;
}
示例7: __get_dma_pgprot
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
return pgprot_writecombine(prot);
return prot;
}
示例8: exynos_mem_mmap
int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
bool cacheable = mem->cacheable;
dma_addr_t start = vma->vm_pgoff << PAGE_SHIFT;
u32 pfn = vma->vm_pgoff;
u32 size = vma->vm_end - vma->vm_start;
if (!cma_is_registered_region(start, size)) {
pr_err("[%s] handling non-cma region (%#[email protected]%#x)is prohibited\n",
__func__, size, start);
return -EINVAL;
}
if (!cacheable)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &exynos_mem_ops;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
pr_err("writable mapping must be shared\n");
return -EINVAL;
}
if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
pr_err("mmap fail\n");
return -EINVAL;
}
vma->vm_ops->open(vma);
return 0;
}
示例9: ion_cp_heap_map_user
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (!ION_IS_CACHED(flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret_value)
ion_cp_release_region(cp_heap);
else
++cp_heap->umap_count;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
示例10: pci_mmap_page_range
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
/*
* XXX we could relax this for I/O spaces for which ACPI
* indicates that the space is 1-to-1 mapped. But at the
* moment, we don't support multiple PCI address spaces and
* the legacy I/O space is not 1-to-1 mapped, so this is moot.
*/
return -EINVAL;
/*
* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
if (write_combine && efi_range_is_wc(vma->vm_start,
vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例11: mali_mmap
/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct mali_session_data *session;
mali_mem_allocation *descriptor;
u32 size = vma->vm_end - vma->vm_start;
u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
session = (struct mali_session_data *)filp->private_data;
if (NULL == session) {
MALI_PRINT_ERROR(("mmap called without any session data available\n"));
return -EFAULT;
}
MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
(unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
(unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
/* Set some bits which indicate that, the memory is IO memory, meaning
* that no paging is to be performed and the memory should not be
* included in crash dumps. And that the memory is reserved, meaning
* that it's present and can never be paged out (see also previous
* entry)
*/
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_DONTCOPY;
vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
vma->vm_flags |= VM_RESERVED;
#else
vma->vm_flags |= VM_DONTDUMP;
vma->vm_flags |= VM_DONTEXPAND;
#endif
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
if (NULL == descriptor) {
descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
if (NULL == descriptor) {
MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
return -ENOMEM;
}
}
MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
vma->vm_private_data = (void *)descriptor;
/* Put on descriptor map */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
_mali_osk_mutex_wait(session->memory_lock);
mali_mem_os_release(descriptor);
_mali_osk_mutex_signal(session->memory_lock);
return -EFAULT;
}
return 0;
}
示例12: hv_cdev_mmap
static int hv_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
{
hv_cdev_private *priv = filp->private_data;
int res;
/* vm_pgoff = the offset of the area in the file, in pages */
/* shift by PAGE_SHIFT to get physical addr offset */
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
/* off is decided by user's mmap() offset parm. If 0, off=0 */
phys_addr_t physical = priv->phys_start + off;
unsigned long vsize = vma->vm_end - vma->vm_start;
unsigned long psize = priv->dev_size - off;
PINFO("%s: enter\n", __func__);
PINFO("off=%lu, physical=%p, vsize=%lu, psize=%lu\n",
off, (void *)physical, vsize, psize);
if (vsize > psize) {
PERR("%s: requested vma size exceeds disk size\n", __func__);
return -EINVAL;
}
vma->vm_ops = &hv_cdev_vm_ops;
switch (hv_mmap_type) {
case 0:
default:
break;
case 1:
pgprot_writecombine(vma->vm_page_prot);
break;
case 2:
pgprot_noncached(vma->vm_page_prot);
break;
}
vma->vm_flags |= VM_LOCKED; /* locked from swap */
PDEBUG("phys_start=%p, page_frame_num=%d\n",
(void *)priv->phys_start, (int)priv->phys_start >> PAGE_SHIFT);
/* Remap the phys addr of device into user space virtual mem */
res = remap_pfn_range(vma,
vma->vm_start,
physical >> PAGE_SHIFT, /* = pfn */
vsize,
vma->vm_page_prot);
if (res) {
PERR("%s: error from remap_pfn_range()/n", __func__);
return -EAGAIN;
} else
PDEBUG("%s: Physical mem remapped to user VA\n", __func__);
return 0;
}
示例13: drm_gem_mmap
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
int ret = 0;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/*
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
示例14: drm_gem_mmap
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* If we find the object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
int ret = 0;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
示例15: kbase_cpu_mmap
static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, u32 nr_pages)
{
struct kbase_cpu_mapping *map;
u64 start_off = vma->vm_pgoff - reg->start_pfn;
phys_addr_t *page_array;
int err = 0;
int i;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
WARN_ON(1);
err = -ENOMEM;
goto out;
}
/*
* VM_DONTCOPY - don't make this mapping available in fork'ed processes
* VM_DONTEXPAND - disable mremap on this region
* VM_IO - disables paging
* VM_DONTDUMP - Don't include in core dumps (3.7 only)
* VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
* This is needed to support using the dedicated and
* the OS based memory backends together.
*/
/*
* This will need updating to propagate coherency flags
* See MIDBASE-1057
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO | VM_MIXEDMAP;
#else
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP;
#endif
vma->vm_ops = &kbase_vm_ops;
vma->vm_private_data = reg;
page_array = kbase_get_phy_pages(reg);
if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
/* We can't map vmalloc'd memory uncached.
* Other memory will have been returned from
* kbase_phy_pages_alloc which should have done the cache
* maintenance necessary to support an uncached mapping
*/
BUG_ON(kaddr);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
if (!kaddr) {
for (i = 0; i < nr_pages; i++) {
err = vm_insert_mixed(vma, vma->vm_start + (i << PAGE_SHIFT), page_array[i + start_off] >> PAGE_SHIFT);
WARN_ON(err);
if (err)
break;
}
} else {