本文整理汇总了C++中pgprot_noncached函数的典型用法代码示例。如果您正苦于以下问题:C++ pgprot_noncached函数的具体用法?C++ pgprot_noncached怎么用?C++ pgprot_noncached使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgprot_noncached函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: allocate_dsa
static int allocate_dsa(void)
{
free_dsa();
g_dsa = __vmalloc(sizeof(struct px_tp_dsa),
GFP_KERNEL,
pgprot_noncached(PAGE_KERNEL));
if (g_dsa == NULL)
{
return -ENOMEM;
}
memset(g_dsa, 0, sizeof(struct px_tp_dsa));
return 0;
}
示例2: idma_mmap
static int idma_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long size, offset;
int ret;
/* From snd_pcm_lib_mmap_iomem */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
size = vma->vm_end - vma->vm_start;
offset = vma->vm_pgoff << PAGE_SHIFT;
ret = io_remap_pfn_range(vma, vma->vm_start,
(runtime->dma_addr + offset) >> PAGE_SHIFT,
size, vma->vm_page_prot);
return ret;
}
示例3: bridge_mmap
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
"flags %lx\n", __func__, filp,
vma->vm_start, vma->vm_end, vma->vm_page_prot,
vma->vm_flags);
return vm_iomap_memory(vma,
pdata->phys_mempool_base,
pdata->phys_mempool_size);
}
示例4: acdb_mmap
static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
{
int result = 0;
uint32_t size = vma->vm_end - vma->vm_start;
pr_debug("%s\n", __func__);
if (atomic64_read(&acdb_data.mem_len)) {
if (size <= atomic64_read(&acdb_data.mem_len)) {
vma->vm_page_prot = pgprot_noncached(
vma->vm_page_prot);
result = remap_pfn_range(vma,
vma->vm_start,
atomic64_read(&acdb_data.paddr) >> PAGE_SHIFT,
size,
vma->vm_page_prot);
} else {
示例5: acdb_mmap
static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
{
int result = 0;
size_t size = vma->vm_end - vma->vm_start;
pr_debug("%s\n", __func__);
mutex_lock(&acdb_data.acdb_mutex);
if (acdb_data.mem_len) {
if (size <= acdb_data.mem_len) {
vma->vm_page_prot = pgprot_noncached(
vma->vm_page_prot);
result = remap_pfn_range(vma,
vma->vm_start,
acdb_data.paddr >> PAGE_SHIFT,
size,
vma->vm_page_prot);
} else {
示例6: knacs_pulse_ctl_mmap
int
knacs_pulse_ctl_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long requested_size = vma->vm_end - vma->vm_start;
if (requested_size > resource_size(pulse_ctl_regs)) {
pr_alert("MMap size too large for pulse controller\n");
return -EINVAL;
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO;
pr_debug("mmap pulse controller\n");
return remap_pfn_range(vma, vma->vm_start,
pulse_ctl_regs->start >> PAGE_SHIFT,
requested_size, vma->vm_page_prot);
}
示例7: epiphany_map_host_memory
/**
* Map memory that can be shared between the Epiphany
* device and user-space
*/
static int epiphany_map_host_memory(struct vm_area_struct *vma)
{
int err;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
err = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
if (err) {
printk(KERN_ERR "Failed mapping host memory to vma 0x%08lx, "
"size 0x%08lx, page offset 0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_pgoff);
}
return err;
}
示例8: kfd_doorbell_mmap
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
{
phys_addr_t address;
struct kfd_dev *dev;
/*
* For simplicitly we only allow mapping of the entire doorbell
* allocation of a single device & process.
*/
if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
return -EINVAL;
/* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff);
if (dev == NULL)
return -EINVAL;
/* Find if pdd exists for combination of process and gpu id */
if (!kfd_get_process_device_data(dev, process, 0))
return -EINVAL;
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(dev, process);
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
(unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation());
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
doorbell_process_allocation(),
vma->vm_page_prot);
}
示例9: iwch_mmap
static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
int len = vma->vm_end - vma->vm_start;
u32 key = vma->vm_pgoff << PAGE_SHIFT;
struct cxio_rdev *rdev_p;
int ret = 0;
struct iwch_mm_entry *mm;
struct iwch_ucontext *ucontext;
u64 addr;
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1)) {
return -EINVAL;
}
rdev_p = &(to_iwch_dev(context->device)->rdev);
ucontext = to_iwch_ucontext(context);
mm = remove_mmap(ucontext, key, len);
if (!mm)
return -EINVAL;
addr = mm->addr;
kfree(mm);
if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
(addr < (rdev_p->rnic_info.udbell_physbase +
rdev_p->rnic_info.udbell_len))) {
/*
* Map T3 DB register.
*/
if (vma->vm_flags & VM_READ) {
return -EPERM;
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma->vm_flags &= ~VM_MAYREAD;
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else {
示例10: s5p_pcm_mmap
static int s5p_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long size, offset;
int ret;
s3cdbg("Entered %s\n", __FUNCTION__);
if(s3c_pcm_pdat.lp_mode){
/* From snd_pcm_lib_mmap_iomem */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO;
size = vma->vm_end - vma->vm_start;
offset = vma->vm_pgoff << PAGE_SHIFT;
ret = io_remap_pfn_range(vma, vma->vm_start,
(runtime->dma_addr + offset) >> PAGE_SHIFT,
size, vma->vm_page_prot);
}else{
示例11: mspec_mmap
/*
* mspec_mmap
*
* Called when mmapping the device. Initializes the vma with a fault handler
* and private data structure necessary to allocate, track, and free the
* underlying pages.
*/
static int
mspec_mmap(struct file *file, struct vm_area_struct *vma,
enum mspec_page_type type)
{
struct vma_data *vdata;
int pages, vdata_size, flags = 0;
if (vma->vm_pgoff != 0)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
if ((vma->vm_flags & VM_WRITE) == 0)
return -EPERM;
pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
if (vdata_size <= PAGE_SIZE)
vdata = kmalloc(vdata_size, GFP_KERNEL);
else {
vdata = vmalloc(vdata_size);
flags = VMD_VMALLOCED;
}
if (!vdata)
return -ENOMEM;
memset(vdata, 0, vdata_size);
vdata->vm_start = vma->vm_start;
vdata->vm_end = vma->vm_end;
vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
return 0;
}
示例12: pci_mmap_legacy_page_range
/**
* pci_mmap_legacy_page_range - map legacy memory space to userland
* @bus: bus whose legacy space we're mapping
* @vma: vma passed in by mmap
*
* Map legacy memory space for this device back to userspace using a machine
* vector to get the base address.
*/
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
char *addr;
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例13: exynos_mem_mmap
int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
bool cacheable = mem->cacheable;
dma_addr_t start = 0;
u32 pfn = 0;
u32 size = vma->vm_end - vma->vm_start;
if (vma->vm_pgoff) {
start = vma->vm_pgoff << PAGE_SHIFT;
pfn = vma->vm_pgoff;
} else {
start = mem->phybase << PAGE_SHIFT;
pfn = mem->phybase;
}
if (!cma_is_registered_region(start, size)) {
pr_err("[%s] handling non-cma region (%#[email protected]%#x)is prohibited\n",
__func__, size, start);
return -EINVAL;
}
if (!cacheable)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &exynos_mem_ops;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
pr_err("writable mapping must be shared\n");
return -EINVAL;
}
if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
pr_err("mmap fail\n");
return -EINVAL;
}
vma->vm_ops->open(vma);
return 0;
}
示例14: bridge_mmap
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
u32 status;
vma->vm_flags |= VM_RESERVED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
"flags %lx\n", __func__, filp,
vma->vm_start, vma->vm_end, vma->vm_page_prot,
vma->vm_flags);
status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (status != 0)
status = -EAGAIN;
return status;
}
示例15: mmap
static int mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long physp;
__D("mmap: vma->vm_start = %#lx\n", vma->vm_start);
__D("mmap: vma->vm_pgoff = %#lx\n", vma->vm_pgoff);
__D("mmap: vma->vm_end = %#lx\n", vma->vm_end);
__D("mmap: size = %#lx\n", vma->vm_end - vma->vm_start);
physp = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED | VM_IO;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
__E("set_noncached: failed remap_pfn_range\n");
return -EAGAIN;
}
return 0;
}