本文整理汇总了C++中pte_unmap函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_unmap函数的具体用法?C++ pte_unmap怎么用?C++ pte_unmap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_unmap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unuse_pte_range
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
/*
* We don't actually need pte lock while scanning for swp_pte: since
* we hold page lock and mmap_sem, swp_pte cannot be inserted into the
* page table while we're scanning; though it could get zapped, and on
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
goto out;
pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
out:
return ret;
}
示例2: memset
/*
* need to get a 16k page for level 1
*/
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pud_t *new_pud, *init_pud;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
new_pud = pud_alloc(mm, new_pgd, 0);
if (!new_pud)
goto no_pud;
new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
pud_free(mm, new_pud);
no_pud:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
示例3: rtR0MemObjLinuxFixPte
/**
* Replace the PFN of a PTE with the address of the actual page.
*
* The caller maps a reserved dummy page at the address with the desired access
* and flags.
*
* This hack is required for older Linux kernels which don't provide
* remap_pfn_range().
*
* @returns 0 on success, -ENOMEM on failure.
* @param mm The memory context.
* @param ulAddr The mapping address.
* @param Phys The physical address of the page to map.
*/
static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
{
int rc = -ENOMEM;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, ulAddr);
if (!pgd_none(*pgd) && !pgd_bad(*pgd))
{
pmd_t *pmd = pmd_offset(pgd, ulAddr);
if (!pmd_none(*pmd))
{
pte_t *ptep = pte_offset_map(pmd, ulAddr);
if (ptep)
{
pte_t pte = *ptep;
pte.pte_high &= 0xfff00000;
pte.pte_high |= ((Phys >> 32) & 0x000fffff);
pte.pte_low &= 0x00000fff;
pte.pte_low |= (Phys & 0xfffff000);
set_pte(ptep, pte);
pte_unmap(ptep);
rc = 0;
}
}
示例4: verify_pages
/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
while (addr < end) {
struct page *p;
pte_t *pte;
pmd_t *pmd;
pgd_t *pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
addr = (addr + PMD_SIZE) & PMD_MASK;
continue;
}
p = NULL;
pte = pte_offset_map(pmd, addr);
if (pte_present(*pte))
p = pte_page(*pte);
pte_unmap(pte);
if (p) {
unsigned nid = page_to_nid(p);
if (!test_bit(nid, nodes))
return -EIO;
}
addr += PAGE_SIZE;
}
return 0;
}
示例5: kthread_wss
int kthread_wss(void *data)
{
unsigned long va;
int ret;
int wss;
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *ptep;
struct task_struct *task;
while(!kthread_should_stop())
{
printk(KERN_INFO "Checking process' WSS.\n");
for_each_process(task)
{
wss = 0;
if(task->mm != NULL)
{
struct vm_area_struct *temp = task->mm->mmap;
while(temp)
{
if(temp->vm_flags & VM_IO){}
else
{
for(va = temp->vm_start; va < temp->vm_end; va+=PAGE_SIZE)
{
pgd = pgd_offset(task->mm,va);
if(pgd_none(*pgd))
break;
pud = pud_offset(pgd,va);
if(pud_none(*pud))
break;
pmd = pmd_offset(pud,va);
if(pmd_none(*pmd))
break;
ptep = pte_offset_map(pmd,va);
ret = 0;
if(pte_young(*ptep))
{
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *) &ptep->pte);
wss++;
}
if(ret)
{
pte_update(task->mm, va, ptep);
}
pte_unmap(ptep);
}
}
temp = temp->vm_next;
}
printk(KERN_INFO "%i: %i\n", task->pid, wss);
}
}
msleep(1000);
}
return 0;
}
示例6: filemap_sync_pte_range
static int filemap_sync_pte_range(pmd_t * pmd,
unsigned long address, unsigned long end,
struct vm_area_struct *vma, unsigned int flags)
{
pte_t *pte;
int error;
if (pmd_none(*pmd))
return 0;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return 0;
}
pte = pte_offset_map(pmd, address);
if ((address & PMD_MASK) != (end & PMD_MASK))
end = (address & PMD_MASK) + PMD_SIZE;
error = 0;
do {
error |= filemap_sync_pte(pte, vma, address, flags);
address += PAGE_SIZE;
pte++;
} while (address && (address < end));
pte_unmap(pte - 1);
return error;
}
示例7: show_pte
/*
* Dump out the page tables associated with 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
do {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd) || pgd_bad(*pgd))
break;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
printk(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
printk("\n");
}
示例8: check_pte_range
/* Ensure all existing pages follow the policy. */
static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long *nodes)
{
pte_t *orig_pte;
pte_t *pte;
spin_lock(&mm->page_table_lock);
orig_pte = pte = pte_offset_map(pmd, addr);
do {
unsigned long pfn;
unsigned int nid;
if (!pte_present(*pte))
continue;
pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
continue;
nid = pfn_to_nid(pfn);
if (!test_bit(nid, nodes))
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(orig_pte);
spin_unlock(&mm->page_table_lock);
return addr != end;
}
示例9: memset
/*
* need to get a 16k page for level 1
*/
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
unsigned long flags;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
pgd_list_lock(flags);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
pgd_list_add(new_pgd);
pgd_list_unlock(flags);
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
#ifdef CONFIG_ARM_FCSE
/* FCSE does not work without high vectors. */
BUG();
#endif /* CONFIG_ARM_FCSE */
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, new_pgd, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
示例10: gup_pte_range
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
u64 mask, result;
pte_t *ptep;
#ifdef CONFIG_X2TLB
result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
if (write)
result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
#elif defined(CONFIG_SUPERH64)
result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
if (write)
result |= _PAGE_WRITE;
#else
result = _PAGE_PRESENT | _PAGE_USER;
if (write)
result |= _PAGE_RW;
#endif
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_val(pte) & mask) != result) {
pte_unmap(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
__flush_anon_page(page, addr);
flush_dcache_page(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1);
return 1;
}
示例11: ref_bits_set
// Return true (!= 0) if any referenced bits are set.
static int ref_bits_set (int exclude_irqhandler) {
void *cur_addr;
pte_t *pte;
int i;
int ret_val = 0;
for (i = 0; i < cr_num_drivers; i++) {
if (exclude_irqhandler) uprintk ("i %d: ", i);
for (cur_addr = cr_base_address[i];
cur_addr < cr_base_address[i] + cr_module_size[i];
cur_addr += PAGE_SIZE) {
pte = virt_to_pte (cur_addr);
if (pte != NULL) {
// See if we're excluding the interrupt handler
// from this check.
if (exclude_irqhandler &&
addr_contains_irq_handler (cur_addr)) {
pte_unmap(pte);
if (exclude_irqhandler) uprintk ("X");
continue;
}
// See if the page was referenced lately.
if (pte_young(*pte) != 0) {
// kunmap_atomic (page, KM_IRQ1);
pte_unmap(pte);
if (exclude_irqhandler) uprintk ("1");
ret_val = 1;
continue;
}
if (exclude_irqhandler) uprintk ("0");
// kunmap_atomic (page, KM_IRQ1);
pte_unmap(pte);
}
}
if (exclude_irqhandler) uprintk ("\n");
}
return ret_val;
}
示例12: sys_my_syscall
asmlinkage long sys_my_syscall( int pid, unsigned long address)
{
struct task_struct* task;
struct mm_struct* mm;
pgd_t* pgd;
pud_t* pud;
pmd_t* pmd;
pte_t* pte;
unsigned long pte_val ;
printk(KERN_INFO "PID: %d, VIRTUAL_ADDR: 0x%lx\n", pid, address);
for_each_process(task)
{
if(task->pid == pid)
{
printk(KERN_INFO "Task %d found\n", task->pid);
mm = task->mm;
pgd = pgd_offset(mm, address);
printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
if(!(pgd_none(*pgd) || pgd_bad(*pgd)) && pgd_present(*pgd))
{
printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
pud = pud_offset(pgd, address);
printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
if(!(pud_none(*pud) || pud_bad(*pud)) && pud_present(*pud))
{
printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
pmd = pmd_offset(pud, address);
printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
if(!(pmd_none(*pmd) || pmd_bad(*pmd)) && pmd_present(*pmd))
{
printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
pte = pte_offset_map(pmd, address);
printk(KERN_INFO "PTE INFO: PRESENT: %d PTE: 0x%lx \n ", pte_present(*pte), pte->pte);
pte_val = pte->pte;
if(pte_val == 0)
pte_val = __pte_to_swp_entry(*pte).val;
pte_unmap(pte);
printk(KERN_INFO "pte_val: %lx\n" , pte_val);
return pte_val;
}
}
}
}
}
printk(KERN_INFO "Data not found!\n");
return 0;
}
示例13: memset
/*
* get_pgd_slow:申请一个pgd项
* notice:一个pgd占用4个页框,每一个pgt项大小为8字节
*/
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
/*pgd占用四个页框*/
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* 复制内核与I/O PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, new_pgd, 0);
if (!new_pmd)
goto no_pmd;
/*返回pmd的第0项页表项,因为第0项用于映射中断向量*/
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
/*返回中断向量的页表项,中断向量位于低地址空间时(0地址开始处)*/
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0);
/*给新的页表项映射中断向量的页表项*/
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap_nested(init_pte);
/*取消new_pte的高端内存的页表映射*/
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
示例14: pmem_user_v2p_video
unsigned int pmem_user_v2p_video(unsigned int va)
{
unsigned int pageOffset = (va & (PAGE_SIZE - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned int pa;
if(NULL==current)
{
MFV_LOGE("[ERROR] pmem_user_v2p_video, current is NULL! \n");
return 0;
}
if(NULL==current->mm)
{
MFV_LOGE("[ERROR] pmem_user_v2p_video, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
return 0;
}
pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
if(pgd_none(*pgd)||pgd_bad(*pgd))
{
MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pgd invalid! \n", va);
return 0;
}
pud = pud_offset(pgd, va);
if(pud_none(*pud)||pud_bad(*pud))
{
MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pud invalid! \n", va);
return 0;
}
pmd = pmd_offset(pud, va);
if(pmd_none(*pmd)||pmd_bad(*pmd))
{
MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pmd invalid! \n", va);
return 0;
}
pte = pte_offset_map(pmd, va);
if(pte_present(*pte))
{
pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset;
pte_unmap(pte);
return pa;
}
MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pte invalid! \n", va);
return 0;
}
示例15: kgsl_cache_range_op
static long kgsl_cache_range_op(unsigned long addr, int size,
unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
unsigned long end;
#endif
BUG_ON(addr & (KGSL_PAGESIZE - 1));
BUG_ON(size & (KGSL_PAGESIZE - 1));
if (flags & KGSL_CACHE_FLUSH)
dmac_flush_range((const void *)addr,
(const void *)(addr + size));
else
if (flags & KGSL_CACHE_CLEAN)
dmac_clean_range((const void *)addr,
(const void *)(addr + size));
else
dmac_inv_range((const void *)addr,
(const void *)(addr + size));
#ifdef CONFIG_OUTER_CACHE
for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
pte_t *pte_ptr, pte;
unsigned long physaddr;
if (flags & KGSL_CACHE_VMALLOC_ADDR)
physaddr = vmalloc_to_pfn((void *)end);
else
if (flags & KGSL_CACHE_USER_ADDR) {
pte_ptr = kgsl_get_pte_from_vaddr(end);
if (!pte_ptr)
return -EINVAL;
pte = *pte_ptr;
physaddr = pte_pfn(pte);
pte_unmap(pte_ptr);
} else
return -EINVAL;
physaddr <<= PAGE_SHIFT;
if (flags & KGSL_CACHE_FLUSH)
outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
else
if (flags & KGSL_CACHE_CLEAN)
outer_clean_range(physaddr,
physaddr + KGSL_PAGESIZE);
else
outer_inv_range(physaddr,
physaddr + KGSL_PAGESIZE);
}
#endif
return 0;
}