本文整理汇总了C++中pmd_offset函数的典型用法代码示例。如果您正苦于以下问题:C++ pmd_offset函数的具体用法?C++ pmd_offset怎么用?C++ pmd_offset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pmd_offset函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: verify_pages
/* Ensure all existing pages follow the policy. */
static int
verify_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end, unsigned long *nodes)
{
while (addr < end) {
struct page *p;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd)) {
unsigned long next = (addr + PGDIR_SIZE) & PGDIR_MASK;
if (next > addr)
break;
addr = next;
continue;
}
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
addr = (addr + PUD_SIZE) & PUD_MASK;
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr = (addr + PMD_SIZE) & PMD_MASK;
continue;
}
p = NULL;
pte = pte_offset_map(pmd, addr);
if (pte_present(*pte))
p = pte_page(*pte);
pte_unmap(pte);
if (p) {
unsigned nid = page_to_nid(p);
if (!test_bit(nid, nodes))
return -EIO;
}
addr += PAGE_SIZE;
}
return 0;
}
示例2: unmap_area_sections
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area() allocates a guard 4K page, so we need to mask
* the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~SZ_1M);
pgd_t *pgd;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
do {
pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
init_mm.context.kvm_seq++;
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
pgd++;
} while (addr < end);
/*
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
__check_kvm_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
示例3: pgd_offset
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return (pte_t *) 0x3a;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
return (pte_t *) 0x3b;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return (pte_t *) 0x10;
return pte_offset_map(pmd, addr);
}
示例4: mprotect_kernel_vm
void mprotect_kernel_vm(int w)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
mm = &init_mm;
for(addr = start_vm; addr < end_vm;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset_kernel(pmd, addr);
if(pte_present(*pte)) protect_vm_page(addr, w, 0);
addr += PAGE_SIZE;
}
else addr += PMD_SIZE;
}
}
示例5: uvirt_to_kva
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if(pte_present(pte)) {
ret = (unsigned long) page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
}
// printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret);
return ret;
}
示例6: pgd_offset
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
return pte;
}
示例7: free_one_pgd
static inline void free_one_pgd(pgd_t * dir)
{
int j;
pmd_t * pmd;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, 0);
pgd_clear(dir);
for (j = 0; j < PTRS_PER_PMD ; j++) {
prefetchw(pmd+j+(PREFETCH_STRIDE/16));
free_one_pmd(pmd+j);
}
pmd_free(pmd);
}
示例8: pgd_offset_k
//walk_page_table modified
static pte_t *walk_page_table(unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(addr);
if (pgd_none(*pgdp))
return NULL;
pudp = pud_offset(pgdp,addr);
if (pud_none(*pudp) || pud_large(*pudp))
return NULL;
pmdp = pmd_offset(pudp, addr);
if (pmd_none(*pmdp) || pmd_large(*pmdp))
return NULL;
ptep = pte_offset_kernel(pmdp, addr);
if (pte_none(*ptep))
return NULL;
return ptep;
}
示例9: alloc_init_page
/*
* Add a PAGE mapping between VIRT and PHYS in domain
* DOMAIN with protection PROT. Note that due to the
* way we map the PTEs, we must allocate two PTE_SIZE'd
* blocks - one for the Linux pte table, and one for
* the hardware pte table.
*/
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
{
pmd_t *pmdp;
pte_t *ptep;
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (pmd_none(*pmdp)) {
pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
sizeof(pte_t));
ptep += PTRS_PER_PTE;
set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
}
ptep = pte_offset(pmdp, virt);
set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
}
示例10: insert_gateway_page
/*
* Insert the gateway page into a set of page tables, creating the
* page tables if necessary.
*/
static void insert_gateway_page(pgd_t *pgd, unsigned long address)
{
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
BUG_ON(!pgd_present(*pgd));
pud = pud_offset(pgd, address);
BUG_ON(!pud_present(*pud));
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd)) {
pte = alloc_bootmem_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
}
pte = pte_offset_kernel(pmd, address);
set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
}
示例11: shmedia_unmapioaddr
static void shmedia_unmapioaddr(unsigned long vaddr)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(vaddr);
pmdp = pmd_offset(pgdp, vaddr);
if (pmd_none(*pmdp) || pmd_bad(*pmdp))
return;
ptep = pte_offset(pmdp, vaddr);
if (pte_none(*ptep) || !pte_present(*ptep))
return;
clear_page((void *)ptep);
pte_clear(ptep);
}
示例12: map_page
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
*/
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
示例13: cpd_cache_flush_page
/*
* cpd_cache_flush_page: Ensures coherency of cache entries owned
* by 'vma_p's mm for 'va_page'.
* Assumptions:
* - Kernel memory coherent with caches.
* - User caches entries covered by 'va_page' are not coherent iff covered by
* a CPD entry owned by the mm associated with 'vma_p'.
* Action:
* - if CPD covering 'va_page' is owned by 'vma_p's mm, invalidate caches
* entries.
* Notes:
* - The page is specified by a VA while the flushing call and CPD access
* uses a MVA.
*/
void
cpd_cache_flush_page(struct vm_area_struct* vma_p, unsigned long va_page)
{
pmd_t cpd;
int domain;
unsigned long mva_page = va_to_mva(va_page, vma_p->vm_mm);
/* Does 'vma_p's mm have any incoherencies? */
if (!cpd_is_mm_cache_coherent(vma_p->vm_mm)) {
cpd = *pmd_offset(pgd_offset_k(mva_page), mva_page);
domain = pmd_domain(cpd);
/* Is CPD entry's domain incoherent and active in 'vma_p's mm? */
if (!cpd_is_domain_cache_coherent(domain) &&
domain_active(vma_p->vm_mm->context.dacr, domain)) {
cpu_cache_clean_invalidate_range(mva_page, mva_page + PAGE_SIZE,
vma_p->vm_flags & VM_EXEC);
}
}
}
示例14: pgd_offset
pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (pgd_addr_invalid(addr))
return NULL;
pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_huge_page(*pmd))
return (pte_t *)pmd;
if (!pmd_present(*pmd))
return NULL;
return pte_offset_kernel(pmd, addr);
}
示例15: paging_init
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
* The parameters are pointers to where to stick the starting and ending
* addresses of available kernel virtual memory.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
/* allocate some pages for kernel housekeeping tasks */
empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
if (num_physpages - num_mappedpages) {
pgd_t *pge;
pud_t *pue;
pmd_t *pme;
pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);
pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
pue = pud_offset(pge, PKMAP_BASE);
pme = pmd_offset(pue, PKMAP_BASE);
__set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
}
#endif
/* distribute the allocatable pages across the various zones and pass them to the allocator
*/
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif
free_area_init(zones_size);
#ifdef CONFIG_MMU
/* initialise init's MMU context */
init_new_context(&init_task, &init_mm);
#endif
} /* end paging_init() */