本文整理汇总了C++中pte_pfn函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_pfn函数的具体用法?C++ pte_pfn怎么用?C++ pte_pfn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_pfn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: set_aliased_prot
/*
* Set the page permissions for a particular virtual address. If the
* address is a vmalloc mapping (or other non-linear mapping), then
* find the linear mapping of the page and also set its protections to
* match.
*/
static void set_aliased_prot(void *v, pgprot_t prot)
{
int level;
pte_t *ptep;
pte_t pte;
unsigned long pfn;
struct page *page;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
page = pfn_to_page(pfn);
pte = pfn_pte(pfn, prot);
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
if (!PageHighMem(page)) {
void *av = __va(PFN_PHYS(pfn));
if (av != v)
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
BUG();
} else
kmap_flush_unused();
}
示例2: _copy_pte
static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
{
pte_t pte = *src_pte;
if (pte_valid(pte)) {
/*
* Resume will overwrite areas that may be marked
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
set_pte(dst_pte, pte_clear_rdonly(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have
* been in use by the original kernel, in which case we need
* to put it back in our copy to do the restore.
*
* Before marking this entry valid, check the pfn should
* be mapped.
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
}
}
示例3: tlb_batch_add
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr;
vaddr &= PAGE_MASK;
if (pte_exec(orig))
vaddr |= 0x1UL;
if (tlb_type != hypervisor &&
pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping;
struct page *page;
if (!pfn_valid(pfn))
goto no_cache_flush;
page = pfn_to_page(pfn);
if (PageReserved(page))
goto no_cache_flush;
/* A real file page? */
mapping = page_mapping(page);
if (!mapping)
goto no_cache_flush;
paddr = (unsigned long) page_address(page);
if ((paddr ^ vaddr) & (1 << 13))
flush_dcache_page_all(mm, page);
}
no_cache_flush:
/*
if (tb->fullmm) {
put_cpu_var(tlb_batch);
return;
}
*/
nr = tb->tlb_nr;
if (unlikely(nr != 0 && mm != tb->mm)) {
flush_tlb_pending();
nr = 0;
}
if (nr == 0)
tb->mm = mm;
tb->vaddrs[nr] = vaddr;
tb->tlb_nr = ++nr;
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
put_cpu_var(tlb_batch);
}
示例4: __replace_page
/**
* __replace_page - replace page in vma by new page.
* based on replace_page in mm/ksm.c
*
* @vma: vma that holds the pte pointing to page
* @addr: address the old @page is mapped at
* @page: the cowed page we are replacing by kpage
* @kpage: the modified page we replace page by
*
* Returns 0 on success, -EFAULT on failure.
*/
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
pte_t *ptep;
int err;
/* For mmu_notifiers */
const unsigned long mmun_start = addr;
const unsigned long mmun_end = addr + PAGE_SIZE;
struct mem_cgroup *memcg;
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
false);
if (err)
return err;
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
ptep = page_check_address(old_page, mm, addr, &ptl, 0);
if (!ptep) {
mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
try_to_free_swap(old_page);
pte_unmap_unlock(ptep, ptl);
if (vma->vm_flags & VM_LOCKED)
munlock_vma_page(old_page);
put_page(old_page);
err = 0;
unlock:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(old_page);
return err;
}
示例5: check_gpte
static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
{
if ((pte_flags(gpte) & _PAGE_PSE) ||
pte_pfn(gpte) >= cpu->lg->pfn_limit) {
kill_guest(cpu, "bad page table entry");
return false;
}
return true;
}
示例6: set_aliased_prot
/*
* Set the page permissions for a particular virtual address. If the
* address is a vmalloc mapping (or other non-linear mapping), then
* find the linear mapping of the page and also set its protections to
* match.
*/
static void set_aliased_prot(void *v, pgprot_t prot)
{
int level;
pte_t *ptep;
pte_t pte;
unsigned long pfn;
struct page *page;
unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
page = pfn_to_page(pfn);
pte = pfn_pte(pfn, prot);
/*
* Careful: update_va_mapping() will fail if the virtual address
* we're poking isn't populated in the page tables. We don't
* need to worry about the direct map (that's always in the page
* tables), but we need to be careful about vmap space. In
* particular, the top level page table can lazily propagate
* entries between processes, so if we've switched mms since we
* vmapped the target in the first place, we might not have the
* top-level page table entry populated.
*
* We disable preemption because we want the same mm active when
* we probe the target and when we issue the hypercall. We'll
* have the same nominal mm, but if we're a kernel thread, lazy
* mm dropping could change our pgd.
*
* Out of an abundance of caution, this uses __get_user() to fault
* in the target address just in case there's some obscure case
* in which the target address isn't readable.
*/
preempt_disable();
pagefault_disable(); /* Avoid warnings due to being atomic. */
__get_user(dummy, (unsigned char __user __force *)v);
pagefault_enable();
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
if (!PageHighMem(page)) {
void *av = __va(PFN_PHYS(pfn));
if (av != v)
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
BUG();
} else
kmap_flush_unused();
preempt_enable();
}
示例7: eeh_token_to_phys
unsigned long eeh_token_to_phys(unsigned long token)
{
if (REGION_ID(token) == EEH_REGION_ID) {
unsigned long vaddr = IO_TOKEN_TO_ADDR(token);
pte_t *ptep = find_linux_pte(ioremap_mm.pgd, vaddr);
unsigned long pa = pte_pfn(*ptep) << PAGE_SHIFT;
return pa | (vaddr & (PAGE_SIZE-1));
} else
return token;
}
示例8: vmemmap_verify
void __meminit vmemmap_verify(pte_t *pte, int node,
unsigned long start, unsigned long end)
{
unsigned long pfn = pte_pfn(*pte);
int actual_node = early_pfn_to_nid(pfn);
if (node_distance(actual_node, node) > LOCAL_DISTANCE)
printk(KERN_WARNING "[%lx-%lx] potential offnode "
"page_structs\n", start, end - 1);
}
示例9: update_mmu_cache
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
#ifdef CONFIG_PPC_STD_MMU
unsigned long access = 0, trap;
#endif
unsigned long pfn = pte_pfn(pte);
/* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
#ifdef CONFIG_8xx
/* On 8xx, cache control instructions (particularly
* "dcbst" from flush_dcache_icache) fault as write
* operation if there is an unpopulated TLB entry
* for the address in question. To workaround that,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
_tlbie(address);
#endif
if (!PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) {
if (vma->vm_mm == current->active_mm) {
__flush_dcache_icache((void *) address);
} else
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
}
#ifdef CONFIG_PPC_STD_MMU
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte) || address >= TASK_SIZE)
return;
/* We try to figure out if we are coming from an instruction
* access fault and pass that down to __hash_page so we avoid
* double-faulting on execution of fresh text. We have to test
* for regs NULL since init will get here first thing at boot
*
* We also avoid filling the hash if not coming from a fault
*/
if (current->thread.regs == NULL)
return;
trap = TRAP(current->thread.regs);
if (trap == 0x400)
access |= _PAGE_EXEC;
else if (trap != 0x300)
return;
hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
}
示例10: eeh_token_to_phys
/**
* eeh_token_to_phys - convert EEH address token to phys address
* @token i/o token, should be address in the form 0xA....
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
pte_t *ptep;
unsigned long pa;
ptep = find_linux_pte(init_mm.pgd, token);
if (!ptep)
return token;
pa = pte_pfn(*ptep) << PAGE_SHIFT;
return pa | (token & (PAGE_SIZE-1));
}
示例11: maybe_pte_to_page
struct page * maybe_pte_to_page(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (unlikely(!pfn_valid(pfn)))
return NULL;
page = pfn_to_page(pfn);
if (PageReserved(page))
return NULL;
return page;
}
示例12: mic_flush_icache_nx
/*
* This is lazy way to flush icache provided the CPU has the NX feature enabled.
* This is called from set_pte.
*/
void mic_flush_icache_nx(pte_t *ptep, pte_t pte)
{
/*
* Donot continue if the icache snoop is enabled
* or if the NX feature doesnt exist
*/
if(icache_snoop || !is_nx_support)
return;
/*
* Similar to the ia64 set_pte code
* We only flush and set PG_arch_1 bit if the page is
* present && page is user page && has backing page struct
* && page is executable &&
* (page swapin or new page or page migration ||
* copy_on_write with page copying)
*/
if (pte_present(pte) && pte_user(pte) && pfn_valid(pte_pfn(pte)) &&
!pte_no_exec(pte) && (!pte_present(*ptep) ||
pte_pfn(*ptep) != pte_pfn(pte)))
mic_flush_icache_lazy(pte_page(pte));
}
示例13: kgsl_cache_range_op
static long kgsl_cache_range_op(unsigned long addr, int size,
unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
unsigned long end;
#endif
BUG_ON(addr & (KGSL_PAGESIZE - 1));
BUG_ON(size & (KGSL_PAGESIZE - 1));
if (flags & KGSL_CACHE_FLUSH)
dmac_flush_range((const void *)addr,
(const void *)(addr + size));
else
if (flags & KGSL_CACHE_CLEAN)
dmac_clean_range((const void *)addr,
(const void *)(addr + size));
else
dmac_inv_range((const void *)addr,
(const void *)(addr + size));
#ifdef CONFIG_OUTER_CACHE
for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
pte_t *pte_ptr, pte;
unsigned long physaddr;
if (flags & KGSL_CACHE_VMALLOC_ADDR)
physaddr = vmalloc_to_pfn((void *)end);
else
if (flags & KGSL_CACHE_USER_ADDR) {
pte_ptr = kgsl_get_pte_from_vaddr(end);
if (!pte_ptr)
return -EINVAL;
pte = *pte_ptr;
physaddr = pte_pfn(pte);
pte_unmap(pte_ptr);
} else
return -EINVAL;
physaddr <<= PAGE_SHIFT;
if (flags & KGSL_CACHE_FLUSH)
outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
else
if (flags & KGSL_CACHE_CLEAN)
outer_clean_range(physaddr,
physaddr + KGSL_PAGESIZE);
else
outer_inv_range(physaddr,
physaddr + KGSL_PAGESIZE);
}
#endif
return 0;
}
示例14: wip_init
int __init wip_init(void)
{
unsigned long va = 0xb77e5000;
int pid = 1072;
//struct page p;
unsigned long long pageFN;
unsigned long long pa;
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *pte;
struct mm_struct *mm;
int found = 0;
struct task_struct *task;
for_each_process(task)
{
if(task->pid == pid)
mm = task->mm;
}
pgd = pgd_offset(mm,va);
if(!pgd_none(*pgd) && !pgd_bad(*pgd))
{
pud = pud_offset(pgd,va);
if(!pud_none(*pud) && !pud_bad(*pud))
{
pmd = pmd_offset(pud,va);
if(!pmd_none(*pmd) && !pmd_bad(*pmd))
{
pte = pte_offset_kernel(pmd,va);
if(!pte_none(*pte))
{
pageFN = pte_pfn(*pte);
pa = ((pageFN<<12)|(va&0x00000FFF));
found = 1;
printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN);
}
}
}
}
if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
{
unsigned long long swapID = (pte_val(*pte) >> 32);
found = 1;
printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID);
}
示例15: find_init_mm_pte
/* Translate address of a vmalloc'd thing to a linear map address */
static void *real_vmalloc_addr(void *x)
{
unsigned long addr = (unsigned long) x;
pte_t *p;
/*
* assume we don't have huge pages in vmalloc space...
* So don't worry about THP collapse/split. Called
* Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
*/
p = find_init_mm_pte(addr, NULL);
if (!p || !pte_present(*p))
return NULL;
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
return __va(addr);
}