本文整理汇总了C++中pud_none函数的典型用法代码示例。如果您正苦于以下问题:C++ pud_none函数的具体用法?C++ pud_none怎么用?C++ pud_none使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pud_none函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: wip_init
int __init wip_init(void)
{
unsigned long va = 0xb77e5000;
int pid = 1072;
//struct page p;
unsigned long long pageFN;
unsigned long long pa;
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *pte;
struct mm_struct *mm;
int found = 0;
struct task_struct *task;
for_each_process(task)
{
if(task->pid == pid)
mm = task->mm;
}
pgd = pgd_offset(mm,va);
if(!pgd_none(*pgd) && !pgd_bad(*pgd))
{
pud = pud_offset(pgd,va);
if(!pud_none(*pud) && !pud_bad(*pud))
{
pmd = pmd_offset(pud,va);
if(!pmd_none(*pmd) && !pmd_bad(*pmd))
{
pte = pte_offset_kernel(pmd,va);
if(!pte_none(*pte))
{
pageFN = pte_pfn(*pte);
pa = ((pageFN<<12)|(va&0x00000FFF));
found = 1;
printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN);
}
}
}
}
if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
{
unsigned long long swapID = (pte_val(*pte) >> 32);
found = 1;
printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID);
}
示例2: shmedia_unmapioaddr
static void shmedia_unmapioaddr(unsigned long vaddr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(vaddr);
if (pgd_none(*pgdp) || pgd_bad(*pgdp))
return;
pudp = pud_offset(pgdp, vaddr);
if (pud_none(*pudp) || pud_bad(*pudp))
return;
pmdp = pmd_offset(pudp, vaddr);
if (pmd_none(*pmdp) || pmd_bad(*pmdp))
return;
ptep = pte_offset_kernel(pmdp, vaddr);
if (pte_none(*ptep) || !pte_present(*ptep))
return;
clear_page((void *)ptep);
pte_clear(&init_mm, vaddr, ptep);
}
示例3: syscall_hooking_init
int syscall_hooking_init(void)
{
if( (sys_call_table = locate_sys_call_table()) == NULL){
printk("<0> Can't find sys_call_table\n");
return -1;
}
pgd_t *pgd = pgd_offset_k((unsigned long)sys_call_table);
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if( pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, (unsigned long)sys_call_table);
if( pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, (unsigned long)sys_call_table);
if( pmd_none(*pmd))
return NULL;
if( pmd_large(*pmd)){
pte = (pte_t *)pmd;
}else{
pte = pte_offset_kernel(pmd, (unsigned long)sys_call_table);
}
pte->pte_low |= _PAGE_KERNEL;
__flush_tlb_single((unsigned long)sys_call_table);
printk("<0> sys_call_table is loaded at %p\n", sys_call_table);
original_call = (void *)sys_call_table[__NR_open];
sys_call_table[__NR_open] = (void *)sys_our_open;
printk("<0> Module Init\n");
return 0;
}
示例4: forall_pte_pgd
/**
* forall_pte_pgd - Execute a function func for all pages within a range
* @pgd: The PGD been examined
* @start: The starting address
* @end: The end address
* @sched_count: A running count of how many times schedule() was called
* @data: Pointer to caller data
* @func: The function to call
*/
inline unsigned long forall_pte_pgd(struct mm_struct *mm, pgd_t *pgd,
unsigned long start, unsigned long end,
unsigned long *sched_count, void *data,
unsigned long (*func)(pte_t *, unsigned long, void *)) {
pud_t *pud;
unsigned long pgd_end;
unsigned long ret=0;
if (pgd_none(*pgd)) return 0;
pud = pud_offset(pgd, start);
if (!pud) return 0;
pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
if (end > pgd_end) end = pgd_end;
do {
if (!pud_none(*pud) ) ret += forall_pte_pud(mm, pud, start, end, sched_count, data, func);
start = (start + PUD_SIZE) & PUD_MASK;
pud++;
} while (start && (start < end));
return ret;
}
示例5: set_pte_mfn
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
BUG();
return;
}
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud)) {
BUG();
return;
}
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
BUG();
return;
}
pte = pte_offset_kernel(pmd, vaddr);
/* <mfn,flags> stored as-is, to permit clearing entries */
xen_set_pte(pte, mfn_pte(mfn, flags));
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
}
示例6: filemap_sync_pmd_range
static inline int filemap_sync_pmd_range(pud_t * pud,
unsigned long address, unsigned long end,
struct vm_area_struct *vma, unsigned int flags)
{
pmd_t * pmd;
int error;
if (pud_none(*pud))
return 0;
if (pud_bad(*pud)) {
pud_ERROR(*pud);
pud_clear(pud);
return 0;
}
pmd = pmd_offset(pud, address);
if ((address & PUD_MASK) != (end & PUD_MASK))
end = (address & PUD_MASK) + PUD_SIZE;
error = 0;
do {
error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
return error;
}
示例7: my_follow_page
static struct page* my_follow_page(struct vm_area_struct *vma, unsigned long addr) {
pud_t *pud = NULL;
pmd_t *pmd = NULL;
pgd_t *pgd = NULL;
pte_t *pte = NULL;
spinlock_t *ptl = NULL;
struct page* page = NULL;
struct mm_struct *mm = current->mm;
pgd = pgd_offset(current->mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
goto out;
}
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
goto out;
}
printk("aaaa\n");
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
goto out;
}
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
printk("bbbb\n");
if (!pte) goto out;
printk("cccc\n");
if (!pte_present(*pte)) goto unlock;
page = pfn_to_page(pte_pfn(*pte));
if (!page) goto unlock;
get_page(page);
unlock:
pte_unmap_unlock(pte, ptl);
out:
return page;
}
示例8: kernel_page_present
/*
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
* is used to determine if a linear map page has been marked as not-valid by
* CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
* This is based on kern_addr_valid(), which almost does what we need.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
* disabled.
*/
bool kernel_page_present(struct page *page)
{
pgd_t *pgdp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
pudp = pud_offset(pgdp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return false;
if (pud_sect(pud))
return true;
pmdp = pmd_offset(pudp, addr);
pmd = READ_ONCE(*pmdp);
if (pmd_none(pmd))
return false;
if (pmd_sect(pmd))
return true;
ptep = pte_offset_kernel(pmdp, addr);
return pte_valid(READ_ONCE(*ptep));
}
示例9: kthread_wss
int kthread_wss(void *data)
{
unsigned long va;
int ret;
int wss;
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *ptep;
struct task_struct *task;
while(!kthread_should_stop())
{
printk(KERN_INFO "Checking process' WSS.\n");
for_each_process(task)
{
wss = 0;
if(task->mm != NULL)
{
struct vm_area_struct *temp = task->mm->mmap;
while(temp)
{
if(temp->vm_flags & VM_IO){}
else
{
for(va = temp->vm_start; va < temp->vm_end; va+=PAGE_SIZE)
{
pgd = pgd_offset(task->mm,va);
if(pgd_none(*pgd))
break;
pud = pud_offset(pgd,va);
if(pud_none(*pud))
break;
pmd = pmd_offset(pud,va);
if(pmd_none(*pmd))
break;
ptep = pte_offset_map(pmd,va);
ret = 0;
if(pte_young(*ptep))
{
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *) &ptep->pte);
wss++;
}
if(ret)
{
pte_update(task->mm, va, ptep);
}
pte_unmap(ptep);
}
}
temp = temp->vm_next;
}
printk(KERN_INFO "%i: %i\n", task->pid, wss);
}
}
msleep(1000);
}
return 0;
}
示例10: kern_addr_valid
int kern_addr_valid(unsigned long addr)
{
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (above != 0 && above != -1UL)
return 0;
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
return 0;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
return 0;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
if (pmd_large(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
return pfn_valid(pte_pfn(*pte));
}
示例11: kasan_populate_pud
static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
unsigned long end, int nid)
{
pmd_t *pmd;
unsigned long next;
if (pud_none(*pud)) {
void *p;
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
((end - addr) == PUD_SIZE) &&
IS_ALIGNED(addr, PUD_SIZE)) {
p = early_alloc(PUD_SIZE, nid, false);
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
return;
else if (p)
memblock_free(__pa(p), PUD_SIZE);
}
p = early_alloc(PAGE_SIZE, nid, true);
pud_populate(&init_mm, pud, p);
}
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (!pmd_large(*pmd))
kasan_populate_pmd(pmd, addr, next, nid);
} while (pmd++, addr = next, addr != end);
}
示例12: pgd_index
/**
* kvm_mips_walk_pgd() - Walk page table with optional allocation.
* @pgd: Page directory pointer.
* @addr: Address to index page table using.
* @cache: MMU page cache to allocate new page tables from, or NULL.
*
* Walk the page tables pointed to by @pgd to find the PTE corresponding to the
* address @addr. If page tables don't exist for @addr, they will be created
* from the MMU cache if @cache is not NULL.
*
* Returns: Pointer to pte_t corresponding to @addr.
* NULL if a page table doesn't exist for @addr and [email protected]
* NULL if a page table allocation failed.
*/
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
pgd += pgd_index(addr);
if (pgd_none(*pgd)) {
/* Not used on MIPS yet */
BUG();
return NULL;
}
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd_t *new_pmd;
if (!cache)
return NULL;
new_pmd = mmu_memory_cache_alloc(cache);
pmd_init((unsigned long)new_pmd,
(unsigned long)invalid_pte_table);
pud_populate(NULL, pud, new_pmd);
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte_t *new_pte;
if (!cache)
return NULL;
new_pte = mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
return pte_offset(pmd, addr);
}
示例13: copy_pud
static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
unsigned long end)
{
pud_t *dst_pudp;
pud_t *src_pudp;
unsigned long next;
unsigned long addr = start;
if (pgd_none(READ_ONCE(*dst_pgdp))) {
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!dst_pudp)
return -ENOMEM;
pgd_populate(&init_mm, dst_pgdp, dst_pudp);
}
dst_pudp = pud_offset(dst_pgdp, start);
src_pudp = pud_offset(src_pgdp, start);
do {
pud_t pud = READ_ONCE(*src_pudp);
next = pud_addr_end(addr, end);
if (pud_none(pud))
continue;
if (pud_table(pud)) {
if (copy_pmd(dst_pudp, src_pudp, addr, next))
return -ENOMEM;
} else {
set_pud(dst_pudp,
__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
}
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
return 0;
}
示例14: set_pte_pfn
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
BUG();
return;
}
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud)) {
BUG();
return;
}
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
BUG();
return;
}
pte = pte_offset_kernel(pmd, vaddr);
/* <pfn,flags> stored as-is, to permit clearing entries */
set_pte(pte, pfn_pte(pfn, flags));
/*
* It's enough to flush this one mapping.
* This appears conservative since it is only called
* from __set_fixmap.
*/
local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
}
示例15: page_table_range_init
void __init page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
int pgd_idx;
unsigned long vaddr;
vaddr = start & PMD_MASK;
end = (end + PMD_SIZE - 1) & PMD_MASK;
pgd_idx = pgd_index(vaddr);
pgd = pgd_base + pgd_idx;
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, 0);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, 0);
if (!pmd_present(*pmd)) {
pte_t *pte_table;
pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
memset(pte_table, 0, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte_table);
}
vaddr += PMD_SIZE;
}
}