本文整理汇总了C++中pte_val函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_val函数的具体用法?C++ pte_val怎么用?C++ pte_val使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_val函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: map_page
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
*/
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
示例2: __iounmap
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
示例3: __flush_hash_table_range
/**
* __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps
* the linux PTEs intact.
*
* @mm : mm_struct of the target address space (generally init_mm)
* @start : starting address
* @end : ending address (not included in the flush)
*
* This function is mostly to be used by some IO hotplug code in order
* to remove all hash entries from a given address range used to map IO
* space on a removed PCI-PCI bidge without tearing down the full mapping
* since 64K pages may overlap with other bridges when using 64K pages
* with 4K HW pages on IO space.
*
* Because of that usage pattern, it is implemented for small size rather
* than speed.
*/
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
bool is_thp;
int hugepage_shift;
unsigned long flags;
start = _ALIGN_DOWN(start, PAGE_SIZE);
end = _ALIGN_UP(end, PAGE_SIZE);
BUG_ON(!mm->pgd);
/* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
* to being hashed). This is not the most performance oriented
* way to do things but is fine for our needs here.
*/
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
&hugepage_shift);
unsigned long pte;
if (ptep == NULL)
continue;
pte = pte_val(*ptep);
if (is_thp)
trace_hugepage_invalidate(start, pte);
if (!(pte & H_PAGE_HASHPTE))
continue;
if (unlikely(is_thp))
hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
else
hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
}
示例4: get_pte_for_vaddr
static unsigned get_pte_for_vaddr(unsigned vaddr)
{
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (!mm)
mm = task->active_mm;
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
pmd = pmd_offset(pgd, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
if (!pte)
return 0;
return pte_val(*pte);
}
示例5: page_set_nocache
static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
unsigned long cl;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
pte_val(*pte) |= _PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_page(NULL, addr);
/* Flush page out of dcache */
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
return 0;
}
示例6: dvma_page
inline unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
{
unsigned long pte;
unsigned long j;
pte_t ptep;
j = *(volatile unsigned long *)kaddr;
*(volatile unsigned long *)kaddr = j;
ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
pte = pte_val(ptep);
// printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
// kaddr, vaddr, pte, len);
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
sun3_put_pte(vaddr, pte);
ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
}
return (vaddr + (kaddr & ~PAGE_MASK));
}
示例7: sun4u_huge_tte_to_shift
static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
{
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ256MB_4U:
shift = HPAGE_256MB_SHIFT;
break;
case _PAGE_SZ4MB_4U:
shift = REAL_HPAGE_SHIFT;
break;
case _PAGE_SZ64K_4U:
shift = HPAGE_64K_SHIFT;
break;
default:
shift = PAGE_SHIFT;
break;
}
return shift;
}
示例8: read_user_stack_slow
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
int ret = -EFAULT;
pgd_t *pgdir;
pte_t *ptep, pte;
unsigned shift;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn, flags;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
if (!ptep)
goto err_out;
if (!shift)
shift = PAGE_SHIFT;
/* align address to page boundary */
offset = addr & ((1UL << shift) - 1);
pte = READ_ONCE(*ptep);
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
goto err_out;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
goto err_out;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(buf, kaddr + offset, nb);
ret = 0;
err_out:
local_irq_restore(flags);
return ret;
}
示例9: __pmd_to_pte
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I...y. .IR...wrdytp
* empty ..0...1...0. -> .10...000000
* prot-none, old ..0...1...1. -> .10...001001
* prot-none, young ..1...1...1. -> .10...001101
* read-only, old ..1...1...0. -> .11...011001
* read-only, young ..1...0...1. -> .01...011101
* read-write, old ..0...1...0. -> .10...111001
* read-write, young ..0...0...1. -> .00...111101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
if (pmd_prot_none(pmd)) {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_YOUNG;
} else {
pte_val(pte) |= _PAGE_READ;
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
pte_val(pte) |= _PAGE_YOUNG;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
示例10: arch_prepare_hugepage
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, address);
if (!ptep)
return -ENOMEM;
pte = mk_pte(page, PAGE_RW);
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
示例11: read_user_stack_slow
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
pgd_t *pgdir;
pte_t *ptep, pte;
int pagesize;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
pagesize = get_slice_psize(current->mm, addr);
/* align address to page boundary */
offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
addr -= offset;
if (is_huge_psize(pagesize))
ptep = huge_pte_offset(current->mm, addr);
else
ptep = find_linux_pte(pgdir, addr);
if (ptep == NULL)
return -EFAULT;
pte = *ptep;
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
return -EFAULT;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
return -EFAULT;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(ret, kaddr + offset, nb);
return 0;
}
示例12: update_dtlb
static void update_dtlb(unsigned long address, pte_t pte)
{
u32 tlbehi;
u32 mmucr;
/*
* We're not changing the ASID here, so no need to flush the
* pipeline.
*/
tlbehi = sysreg_read(TLBEHI);
tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
tlbehi |= address & MMU_VPN_MASK;
tlbehi |= SYSREG_BIT(TLBEHI_V);
sysreg_write(TLBEHI, tlbehi);
/* Does this mapping already exist? */
__builtin_tlbs();
mmucr = sysreg_read(MMUCR);
if (mmucr & SYSREG_BIT(MMUCR_N)) {
/* Not found -- pick a not-recently-accessed entry */
unsigned int rp;
u32 tlbar = sysreg_read(TLBARLO);
rp = 32 - fls(tlbar);
if (rp == 32) {
rp = 0;
sysreg_write(TLBARLO, -1L);
}
mmucr = SYSREG_BFINS(DRP, rp, mmucr);
sysreg_write(MMUCR, mmucr);
}
sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
/* Let's go */
__builtin_tlbw();
}
示例13: local_flush_tlb_range
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE.
*/
void
local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *pte;
unsigned long pmd_end;
unsigned int ctx = mm->context;
if (Hash == 0) {
_tlbia();
return;
}
start &= PAGE_MASK;
if (start >= end)
return;
pmd = pmd_offset(pgd_offset(mm, start), start);
do {
pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
if (!pmd_none(*pmd)) {
if (!pmd_end || pmd_end > end)
pmd_end = end;
pte = pte_offset(pmd, start);
do {
if ((pte_val(*pte) & _PAGE_HASHPTE) != 0)
flush_hash_page(ctx, start, pte);
start += PAGE_SIZE;
++pte;
} while (start && start < pmd_end);
} else {
start = pmd_end;
}
++pmd;
} while (start && start < end);
#ifdef CONFIG_SMP
smp_send_tlb_invalidate(0);
#endif
}
示例14: local_flush_tlb_page
void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
pte_t *pte;
if (Hash == 0) {
_tlbie(vmaddr);
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd)) {
pte = pte_offset(pmd, vmaddr);
if (pte_val(*pte) & _PAGE_HASHPTE)
flush_hash_page(mm->context, vmaddr, pte);
}
#ifdef CONFIG_SMP
smp_send_tlb_invalidate(0);
#endif
}
示例15: set_pte_at
/*
* set_pte stores a linux PTE into the linux page table.
*/
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
/*
* When handling numa faults, we already have the pte marked
* _PAGE_PRESENT, but we can be sure that it is not in hpte.
* Hence we can use set_pte_at for them.
*/
VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
/* Add the pte bit when trying to set a pte */
pte = __pte(pte_val(pte) | _PAGE_PTE);
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
* is called.
*/
pte = set_pte_filter(pte);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
}