本文整理汇总了C++中pte_page函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_page函数的具体用法?C++ pte_page怎么用?C++ pte_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: put_long
/*
* This routine puts a long into any process space by following the page
* tables. NOTE! You should check that the long isn't on a page boundary,
* and that it is in the task area before calling this: this routine does
* no checking.
*
* Now keeps R/W state of page so that a text page stays readonly
* even if a debugger scribbles breakpoints into it. -M.U-
*/
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
unsigned long data)
{
pgd_t *pgdir;
pmd_t *pgmiddle;
pte_t *pgtable;
unsigned long page;
repeat:
pgdir = pgd_offset(vma->vm_mm, addr);
if (!pgd_present(*pgdir)) {
do_no_page(tsk, vma, addr, 1);
goto repeat;
}
if (pgd_bad(*pgdir)) {
printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
pgd_clear(pgdir);
return;
}
pgmiddle = pmd_offset(pgdir, addr);
if (pmd_none(*pgmiddle)) {
do_no_page(tsk, vma, addr, 1);
goto repeat;
}
if (pmd_bad(*pgmiddle)) {
printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
pmd_clear(pgmiddle);
return;
}
pgtable = pte_offset(pgmiddle, addr);
if (!pte_present(*pgtable)) {
do_no_page(tsk, vma, addr, 1);
goto repeat;
}
page = pte_page(*pgtable);
if (!pte_write(*pgtable)) {
do_wp_page(tsk, vma, addr, 1);
goto repeat;
}
/* this is a hack for non-kernel-mapped video buffers and similar */
if (page < high_memory)
*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
flush_tlb();
}
示例2: flush_icache_page_range
/**
* flush_icache_page_range - Flush dcache and invalidate icache for part of a
* single page
* @start: The starting virtual address of the page part.
* @end: The ending virtual address of the page part.
*
* Invalidate the icache for part of a single page, as determined by the
* virtual addresses given. The page must be in the paged area. The dcache is
* not flushed as the cache must be in write-through mode to get here.
*/
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
/* work out how much of the page to flush */
off = start & ~PAGE_MASK;
size = end - start;
/* get the physical address the page is mapped to from the page
* tables */
pgd = pgd_offset(current->mm, start);
if (!pgd || !pgd_val(*pgd))
return;
pud = pud_offset(pgd, start);
if (!pud || !pud_val(*pud))
return;
pmd = pmd_offset(pud, start);
if (!pmd || !pmd_val(*pmd))
return;
ppte = pte_offset_map(pmd, start);
if (!ppte)
return;
pte = *ppte;
pte_unmap(ppte);
if (pte_none(pte))
return;
page = pte_page(pte);
if (!page)
return;
addr = page_to_phys(page);
/* invalidate the icache coverage on that region */
mn10300_local_icache_inv_range2(addr + off, size);
smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
}
示例3: gup_pte_range
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
pte_t *ptep;
if (tlb_type == hypervisor) {
result = _PAGE_PRESENT_4V|_PAGE_P_4V;
if (write)
result |= _PAGE_WRITE_4V;
} else {
result = _PAGE_PRESENT_4U|_PAGE_P_4U;
if (write)
result |= _PAGE_WRITE_4U;
}
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_kernel(&pmd, addr);
do {
struct page *page, *head;
pte_t pte = *ptep;
if ((pte_val(pte) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
/* The hugepage case is simplified on sparc64 because
* we encode the sub-page pfn offsets into the
* hugepage PTEs. We could optimize this in the future
* use page_cache_add_speculative() for the hugepage case.
*/
page = pte_page(pte);
head = compound_head(page);
if (!page_cache_get_speculative(head))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
return 0;
}
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
return 1;
}
示例4: forget_pte
static inline void forget_pte(pte_t page)
{
if (pte_none(page))
return;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
free_page_and_swap_cache(ptpage);
return;
}
swap_free(pte_to_swp_entry(page));
}
示例5: get_gate_page
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, address);
BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
}
get_page(*page);
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
示例6: free_pte
/*
* Return indicates whether a page was freed so caller can adjust rss
*/
static inline int free_pte(pte_t pte)
{
if (pte_present(pte)) {
struct page *page = pte_page(pte);
if ((!VALID_PAGE(page)) || PageReserved(page))
return 0;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
if (pte_dirty(pte) && page->mapping)
set_page_dirty(page);
free_page_and_swap_cache(page);
return 1;
}
swap_free(pte_to_swp_entry(pte));
return 0;
}
示例7: gup_pte_range
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
u64 mask, result;
pte_t *ptep;
#ifdef CONFIG_X2TLB
result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
if (write)
result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
#elif defined(CONFIG_SUPERH64)
result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
if (write)
result |= _PAGE_WRITE;
#else
result = _PAGE_PRESENT | _PAGE_USER;
if (write)
result |= _PAGE_RW;
#endif
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_val(pte) & mask) != result) {
pte_unmap(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
__flush_anon_page(page, addr);
flush_dcache_page(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1);
return 1;
}
示例8: VMALLOC_VMADDR
struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long kaddr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
struct page *page = NULL;
/* address is user VA; convert to kernel VA of desired page */
kaddr = (address - vma->vm_start) + offset;
kaddr = VMALLOC_VMADDR(kaddr);
spin_lock(&init_mm.page_table_lock);
/* Lookup page structure for kernel VA */
pgd = pgd_offset(&init_mm, kaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
goto out;
pmd = pmd_offset(pgd, kaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
goto out;
ptep = pte_offset(pmd, kaddr);
if (!ptep)
goto out;
pte = *ptep;
if (!pte_present(pte))
goto out;
if (write && !pte_write(pte))
goto out;
page = pte_page(pte);
if (!VALID_PAGE(page)) {
page = NULL;
goto out;
}
/* Increment reference count on page */
get_page(page);
out:
spin_unlock(&init_mm.page_table_lock);
return page;
}
示例9: spin_lock
/**
* get_struct_page - Gets a struct page for a particular address
* @address - the address of the page we need
*
* Two versions of this function have to be provided for working
* between the 2.4 and 2.5 kernels. Rather than littering the
* function with #defines, there is just two separate copies.
* Look at the one that is relevant to the kernel you're using
*/
struct page *get_struct_page(unsigned long addr)
{
struct mm_struct *mm;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
unsigned long pfn;
struct page *page=NULL;
mm = current->mm;
/* Is this possible? */
if (!mm) return NULL;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud) && !pud_bad(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
/*
* disable preemption because of potential kmap().
* page_table_lock should already have disabled
* preemtion. But, be paranoid.
*/
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
pte_unmap(ptep);
preempt_enable();
if (pte_present(pte)) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn))
page = pte_page(pte);
}
}
}
}
spin_unlock(&mm->page_table_lock);
return page;
}
示例10: hash_page_do_lazy_icache
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
{
struct page *page;
if (!pfn_valid(pte_pfn(pte)))
return pp;
page = pte_page(pte);
/* page is dirty */
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
if (trap == 0x400) {
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
} else
pp |= HPTE_R_N;
}
return pp;
}
示例11: update_mmu_cache
void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
unsigned long pteval;
unsigned long vpn;
/* Ptrace may call this routine. */
if (vma && current->active_mm != vma->vm_mm)
return;
#if defined(CONFIG_SH7705_CACHE_32KB)
{
struct page *page = pte_page(pte);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys),
PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
}
#endif
local_irq_save(flags);
/* Set PTEH register */
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
pteval = pte_val(pte);
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
/* conveniently, we want all the software flags to be 0 anyway */
ctrl_outl(pteval, MMU_PTEL);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
local_irq_restore(flags);
}
示例12: gup_pte_range
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
pte_t *ptep;
if (tlb_type == hypervisor) {
result = _PAGE_PRESENT_4V|_PAGE_P_4V;
if (write)
result |= _PAGE_WRITE_4V;
} else {
result = _PAGE_PRESENT_4U|_PAGE_P_4U;
if (write)
result |= _PAGE_WRITE_4U;
}
mask = result | _PAGE_SPECIAL;
ptep = pte_offset_kernel(&pmd, addr);
do {
struct page *page, *head;
pte_t pte = *ptep;
if ((pte_val(pte) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);
if (!page_cache_get_speculative(head))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
return 0;
}
if (head != page)
get_huge_page_tail(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
return 1;
}
示例13: verify_pages
/* Ensure all existing pages follow the policy. */
static int
verify_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end, unsigned long *nodes)
{
while (addr < end) {
struct page *p;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd)) {
unsigned long next = (addr + PGDIR_SIZE) & PGDIR_MASK;
if (next > addr)
break;
addr = next;
continue;
}
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
addr = (addr + PUD_SIZE) & PUD_MASK;
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr = (addr + PMD_SIZE) & PMD_MASK;
continue;
}
p = NULL;
pte = pte_offset_map(pmd, addr);
if (pte_present(*pte))
p = pte_page(*pte);
pte_unmap(pte);
if (p) {
unsigned nid = page_to_nid(p);
if (!test_bit(nid, nodes))
return -EIO;
}
addr += PAGE_SIZE;
}
return 0;
}
示例14: get_long
/*
* This routine gets a long from any process space by following the page
* tables. NOTE! You should check that the long isn't on a page boundary,
* and that it is in the task area before calling this: this routine does
* no checking.
*
*/
static unsigned long get_long(struct task_struct * tsk,
struct vm_area_struct * vma, unsigned long addr)
{
pgd_t * pgdir;
pmd_t * pgmiddle;
pte_t * pgtable;
unsigned long page;
repeat:
pgdir = pgd_offset(vma->vm_mm, addr);
if (pgd_none(*pgdir)) {
do_no_page(tsk, vma, addr, 0);
goto repeat;
}
if (pgd_bad(*pgdir)) {
printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
pgd_clear(pgdir);
return 0;
}
pgmiddle = pmd_offset(pgdir,addr);
if (pmd_none(*pgmiddle)) {
do_no_page(tsk, vma, addr, 0);
goto repeat;
}
if (pmd_bad(*pgmiddle)) {
printk("ptrace: bad page directory %08lx\n",
pmd_val(*pgmiddle));
pmd_clear(pgmiddle);
return 0;
}
pgtable = pte_offset(pgmiddle, addr);
if (!pte_present(*pgtable)) {
do_no_page(tsk, vma, addr, 0);
goto repeat;
}
page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
if (page >= high_memory)
return 0;
page += addr & ~PAGE_MASK;
return *(unsigned long *) page;
}
示例15: uvirt_to_kva
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte)) {
ret = (unsigned long) page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
}
// printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret);
return ret;
}