本文整理汇总了C++中remap_pfn_range函数的典型用法代码示例。如果您正苦于以下问题:C++ remap_pfn_range函数的具体用法?C++ remap_pfn_range怎么用?C++ remap_pfn_range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了remap_pfn_range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mfc_mmap
static int mfc_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long vir_size = vma->vm_end - vma->vm_start;
unsigned long phy_size, firmware_size;
unsigned long page_frame_no = 0;
struct mfc_inst_ctx *mfc_ctx;
mfc_debug("vma->vm_start = 0x%08x, vma->vm_end = 0x%08x\n",
(unsigned int)vma->vm_start,
(unsigned int)vma->vm_end);
mfc_debug("vma->vm_end - vma->vm_start = %ld\n", vir_size);
mfc_ctx = (struct mfc_inst_ctx *)filp->private_data;
firmware_size = mfc_get_port0_buff_paddr() - mfc_get_fw_buff_paddr();
phy_size = (unsigned long)(mfc_port0_memsize - firmware_size + mfc_port1_memsize);
/* if memory size required from appl. mmap() is bigger than max data memory
* size allocated in the driver */
if (vir_size > phy_size) {
mfc_err("virtual requested mem(%ld) is bigger than physical mem(%ld)\n",
vir_size, phy_size);
return -EINVAL;
}
#ifdef CONFIG_MACH_ARIES
mfc_ctx->port0_mmap_size = mfc_port0_memsize - firmware_size;
#else // CONFIG_MACH_P1
mfc_ctx->port0_mmap_size = (vir_size / 2);
#endif
vma->vm_flags |= VM_RESERVED | VM_IO;
if (mfc_ctx->buf_type != MFC_BUFFER_CACHE)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* port0 mapping for stream buf & frame buf (chroma + MV)
*/
page_frame_no = __phys_to_pfn(mfc_get_port0_buff_paddr());
if (remap_pfn_range(vma, vma->vm_start, page_frame_no,
mfc_ctx->port0_mmap_size, vma->vm_page_prot)) {
mfc_err("mfc remap port0 error\n");
return -EAGAIN;
}
vma->vm_flags |= VM_RESERVED | VM_IO;
if (mfc_ctx->buf_type != MFC_BUFFER_CACHE)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* port1 mapping for frame buf (luma)
*/
page_frame_no = __phys_to_pfn(mfc_get_port1_buff_paddr());
if (remap_pfn_range(vma, vma->vm_start + mfc_ctx->port0_mmap_size,
page_frame_no, vir_size - mfc_ctx->port0_mmap_size, vma->vm_page_prot)) {
mfc_err("mfc remap port1 error\n");
return -EAGAIN;
}
mfc_debug("virtual requested mem = %ld, physical reserved data mem = %ld\n", vir_size, phy_size);
return 0;
}
示例2: ion_cp_heap_map_user
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (ION_IS_CACHED(flags))
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) +
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
else
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) +
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
pgprot_noncached(vma->vm_page_prot));
if (ret_value)
ion_cp_release_region(cp_heap);
else
++cp_heap->umap_count;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
示例3: pci_mmap_legacy_page_range
/**
* pci_mmap_legacy_page_range - map legacy memory space to userland
* @bus: bus whose legacy space we're mapping
* @vma: vma passed in by mmap
*
* Map legacy memory space for this device back to userspace using a machine
* vector to get the base address.
*/
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
pgprot_t prot;
char *addr;
/*
* Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
* for more details.
*/
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例4: mxc_v4l2out_mmap
/*!
* V4L2 interface - mmap function
*
* @param file structure file *
*
* @param vma structure vm_area_struct *
*
* @return status 0 Success, EINTR busy lock error,
* ENOBUFS remap_page error
*/
static int mxc_v4l2out_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *dev = file->private_data;
unsigned long size;
int res = 0;
vout_data *vout = video_get_drvdata(dev);
DPRINTK("pgoff=0x%x, start=0x%x, end=0x%x\n",
vma->vm_pgoff, vma->vm_start, vma->vm_end);
/* make this _really_ smp-safe */
if (down_interruptible(&vout->busy_lock))
return -EINTR;
size = vma->vm_end - vma->vm_start;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff, size, vma->vm_page_prot)) {
printk("mxc_mmap(V4L)i - remap_pfn_range failed\n");
res = -ENOBUFS;
goto mxc_mmap_exit;
}
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
mxc_mmap_exit:
up(&vout->busy_lock);
return res;
}
示例5: pci_mmap_page_range
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
/*
* XXX we could relax this for I/O spaces for which ACPI
* indicates that the space is 1-to-1 mapped. But at the
* moment, we don't support multiple PCI address spaces and
* the legacy I/O space is not 1-to-1 mapped, so this is moot.
*/
return -EINVAL;
/*
* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
if (write_combine && efi_range_is_wc(vma->vm_start,
vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例6: dlfb_mmap
static int dlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
printk("MMAP: %lu %u\n", offset + size, info->fix.smem_len);
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
示例7: defined
static int dk_mmap
(
struct file *file,
struct vm_area_struct *vma
)
{
#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_page_prot = phys_mem_access_prot(file, offset,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
#elif defined(pgprot_noncached)
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int uncached;
uncached = uncached_access(file, offset);
if (uncached)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
if (remap_pfn_range(vma,
vma->vm_start,
vma->vm_pgoff,
vma->vm_end-vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例8: mmap_kmem
// helper function, mmap's the allocated area which is physically contiguous
int mmap_kmem(struct file *filp, struct vm_area_struct *vma)
{
int ret;
long length = vma->vm_end - vma->vm_start;
/* check length - do not allow larger mappings than the number of
pages allocated */
if (length > NPAGES * PAGE_SIZE)
return -EIO;
/* #ifdef ARCH_HAS_DMA_MMAP_COHERENT */
if (vma->vm_pgoff == 0) {
printk(KERN_INFO "Using dma_mmap_coherent\n");
ret = dma_mmap_coherent(NULL, vma, alloc_ptr,
dma_handle, length);
} else
/* #else */
{
printk(KERN_INFO "Using remap_pfn_range\n");
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO;
printk(KERN_INFO "off=%d\n", vma->vm_pgoff);
ret = remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(virt_to_phys(bus_to_virt(dma_handle))) +
vma->vm_pgoff, length, vma->vm_page_prot);
}
/* #endif */
/* map the whole physically contiguous area in one piece */
if (ret < 0) {
printk(KERN_ERR "mmap_alloc: remap failed (%d)\n", ret);
return ret;
}
return 0;
}
示例9: mips_dma_mmap
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
示例10: vpp_mmap
//only for test
static int vpp_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long start = 0;
unsigned long size = 0;
logi();
if (NULL == vma)
{
loge("can not get vm_area_struct!");
return -EBADF;
}
start = vma->vm_start;
size = vma->vm_end - vma->vm_start;
/* make buffers write-thru cacheable */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); //update to 2.6.32
if (remap_pfn_range(vma, start, vma->vm_pgoff, size, vma->vm_page_prot))
{
loge("remap_pfn_range error!");
return -ENOBUFS;
}
return 0;
}
示例11: assemble_vma
int assemble_vma (struct phys_mem_session* session, struct vm_area_struct * vma){
unsigned long request_iterator;
int insert_status = 0;
for (request_iterator = 0; request_iterator < session->num_frame_stati; request_iterator++){
struct phys_mem_frame_status* frame_status = &session->frame_stati[request_iterator];
if ( frame_status->page) {
//split_page(frame_status->page, 0);
//insert_status = vm_insert_page(vma,vma->vm_start + frame_status->vma_offset_of_first_byte, frame_status->page);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
insert_status = remap_pfn_range(vma,
vma->vm_start + frame_status->vma_offset_of_first_byte,
page_to_pfn(frame_status->page),
PAGE_SIZE,
vma->vm_page_prot);
if (unlikely(insert_status)){
/* Upps! We could not insert our page. This should not really happen, so we just print that
* and mark it in the configuration.*/
printk(KERN_WARNING "Could not insert page %p into VMA! Reason: %d", frame_status->page, insert_status);
frame_status->actual_source |= SOURCE_ERROR_NOT_MAPPABLE;
goto out;
}
}
}
out:
return insert_status;
}
示例12: udl_fb_mmap
static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
return 0;
}
示例13: shm_driver_mmap_cache
static int shm_driver_mmap_cache(struct file *filp, struct vm_area_struct *vma)
{
unsigned long pfn, vsize;
shm_device_t *pDevice;
struct shm_device_priv_data *priv_data
= (struct shm_device_priv_data*)filp->private_data;
if (NULL == priv_data) {
shm_error("shm_driver_mmap_cache NULL private data\n");
return -ENOTTY;
}
pDevice = (shm_device_t*)priv_data->m_device;
if (NULL == pDevice) {
shm_error("shm_driver_mmap_cache NULL shm device\n");
return -ENOTTY;
}
pfn = pDevice->m_base >> PAGE_SHIFT;
vsize = vma->vm_end - vma->vm_start;
shm_debug("shm_driver_mmap_nocache size = 0x%08lX(0x%x, 0x%x), base:0x%x\n",
vsize, shm_size_cache, pDevice->m_size, pDevice->m_base);
if (vsize > shm_size_cache)
return -EINVAL;
vma->vm_pgoff = 0; // skip offset
if (remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例14: ion_kernel_mapper_map_user
static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
struct ion_buffer *buffer,
struct vm_area_struct *vma,
struct ion_mapping *mapping)
{
int ret;
switch (buffer->heap->type) {
case ION_HEAP_KMALLOC:
{
unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
break;
}
case ION_HEAP_VMALLOC:
ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
break;
default:
pr_err("%s: attempting to map unsupported heap to userspace\n",
__func__);
return -EINVAL;
}
return ret;
}
示例15: SYSRAM_mmap
//------------------------------------------------------------------------------
static int SYSRAM_mmap(
struct file* pFile,
struct vm_area_struct* pVma)
{
//LOG_MSG("");
pVma->vm_page_prot = pgprot_noncached(pVma->vm_page_prot);
long length = pVma->vm_end - pVma->vm_start;
MUINT32 pfn=pVma->vm_pgoff<<PAGE_SHIFT;//page from number, physical address of kernel memory
LOG_WRN("pVma->vm_pgoff(0x%x),phy(0x%x),pVmapVma->vm_start(0x%x),pVma->vm_end(0x%x),length(0x%x)",\
pVma->vm_pgoff,pVma->vm_pgoff<<PAGE_SHIFT,pVma->vm_start,pVma->vm_end,length);
if((length>ISP_VALID_REG_RANGE) || (pfn<IMGSYS_BASE_ADDR) || (pfn>(IMGSYS_BASE_ADDR+ISP_VALID_REG_RANGE)))
{
LOG_ERR("mmap range error : vm_start(0x%x),vm_end(0x%x),length(0x%x),pfn(0x%x)!",pVma->vm_start,pVma->vm_end,length,pfn);
return -EAGAIN;
}
if(remap_pfn_range(
pVma,
pVma->vm_start,
pVma->vm_pgoff,
pVma->vm_end - pVma->vm_start,
pVma->vm_page_prot))
{
LOG_ERR("fail");
return -EAGAIN;
}
return 0;
}