本文整理汇总了C++中pgd_offset_k函数的典型用法代码示例。如果您正苦于以下问题:C++ pgd_offset_k函数的具体用法?C++ pgd_offset_k怎么用?C++ pgd_offset_k使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgd_offset_k函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: shmedia_mapioaddr
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long flags = 1; /* 1 = CB0-1 device */
DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n", pa, va));
pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp)) {
pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
if (pmdp == NULL) panic("No memory for pmd\n");
memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
}
pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp)) {
ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
if (ptep == NULL) panic("No memory for pte\n");
clear_page((void *)ptep);
set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
}
ptep = pte_offset(pmdp, va);
set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
_PAGE_READ | _PAGE_WRITE |
_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
示例2: remap_area_pages
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
int error;
pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address;
dir = pgd_offset_k(address);
flush_cache_all();
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
error = -ENOMEM;
if (!pmd)
break;
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
break;
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return error;
}
示例3: unmap_area_sections
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
* mask the size back to 4MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
pgd_t *pgd;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
do {
pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
pgd++;
} while (addr < end);
flush_tlb_kernel_range(virt, end);
}
示例4: ioremap_page_range
int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pgd_t *pgd;
unsigned long start;
unsigned long next;
int err;
might_sleep();
BUG_ON(addr >= end);
start = addr;
phys_addr -= addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
flush_cache_vmap(start, end);
return err;
}
示例5: handle_kernel_vaddr_fault
/*
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
* Refer to asm/processor.h for System Memory Map
*
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
*/
noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd = pgd_offset_fast(current->active_mm, address);
pgd_k = pgd_offset_k(address);
if (!pgd_present(*pgd_k))
goto bad_area;
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto bad_area;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto bad_area;
set_pmd(pmd, *pmd_k);
/* XXX: create the TLB entry here */
return 0;
bad_area:
return 1;
}
示例6: shmedia_unmapioaddr
static void shmedia_unmapioaddr(unsigned long vaddr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(vaddr);
if (pgd_none(*pgdp) || pgd_bad(*pgdp))
return;
pudp = pud_offset(pgdp, vaddr);
if (pud_none(*pudp) || pud_bad(*pudp))
return;
pmdp = pmd_offset(pudp, vaddr);
if (pmd_none(*pmdp) || pmd_bad(*pmdp))
return;
ptep = pte_offset_kernel(pmdp, vaddr);
if (pte_none(*ptep) || !pte_present(*ptep))
return;
clear_page((void *)ptep);
pte_clear(&init_mm, vaddr, ptep);
}
示例7: init_pmd
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte;
unsigned long i;
n_pages = ALIGN(n_pages, PTRS_PER_PTE);
pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
__func__, vaddr, n_pages);
pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; ++i)
pte_clear(NULL, 0, pte + i);
for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
pte_t *cur_pte = pte + i;
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
__func__, pmd, cur_pte);
}
return pte;
}
示例8: remap_area_sections
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unmap_area_sections(virt, size);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
do {
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
flush_pmd_entry(pmd);
addr += PMD_SIZE;
pmd += 2;
} while (addr < end);
return 0;
}
示例9: verify_pages
/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
while (addr < end) {
struct page *p;
pte_t *pte;
pmd_t *pmd;
pgd_t *pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
addr = (addr + PMD_SIZE) & PMD_MASK;
continue;
}
p = NULL;
pte = pte_offset_map(pmd, addr);
if (pte_present(*pte))
p = pte_page(*pte);
pte_unmap(pte);
if (p) {
unsigned nid = page_to_nid(p);
if (!test_bit(nid, nodes))
return -EIO;
}
addr += PAGE_SIZE;
}
return 0;
}
示例10: kernel_page_present
/*
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
* is used to determine if a linear map page has been marked as not-valid by
* CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
* This is based on kern_addr_valid(), which almost does what we need.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
* disabled.
*/
bool kernel_page_present(struct page *page)
{
pgd_t *pgdp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
pudp = pud_offset(pgdp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return false;
if (pud_sect(pud))
return true;
pmdp = pmd_offset(pudp, addr);
pmd = READ_ONCE(*pmdp);
if (pmd_none(pmd))
return false;
if (pmd_sect(pmd))
return true;
ptep = pte_offset_kernel(pmdp, addr);
return pte_valid(READ_ONCE(*ptep));
}
示例11: copy_user_page
/*
* copy_user_page
* @to: P1 address
* @from: P1 address
* @address: U0 address to be mapped
* @page: page (virt_to_page(to))
*/
void copy_user_page(void *to, void *from, unsigned long address,
struct page *page)
{
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
_PAGE_RW | _PAGE_CACHABLE |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *dir = pgd_offset_k(p3_addr);
pmd_t *pmd = pmd_offset(dir, p3_addr);
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr);
local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to);
pte_clear(&init_mm, p3_addr, pte);
up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
}
}
示例12: pgd_offset_k
/*From: http://www.scs.ch/~frey/linux/memorymap.html*/
volatile void *virt_to_kseg(volatile void *address) {
pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
unsigned long va, ret = 0UL;
va=VMALLOC_VMADDR((unsigned long)address);
/* get the page directory. Use the kernel memory map. */
pgd = pgd_offset_k(va);
/* check whether we found an entry */
if (!pgd_none(*pgd)) {
/*I'm not sure if we need this, or the line for 2.4*/
/*above will work reliably too*/
/*If you know, please email me :-)*/
pud_t *pud = pud_offset(pgd, va);
pmd = pmd_offset(pud, va);
/* check whether we found an entry */
if (!pmd_none(*pmd)) {
/* get a pointer to the page table entry */
ptep = pte_offset_map(pmd, va);
pte = *ptep;
/* check for a valid page */
if (pte_present(pte)) {
/* get the address the page is refering to */
ret = (unsigned long)page_address(pte_page(pte));
/* add the offset within the page to the page address */
ret |= (va & (PAGE_SIZE -1));
}
}
}
return((volatile void *)ret);
}
示例13: hash__map_kernel_page
/*
* map_kernel_page currently only called by __ioremap
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
pudp = pud_alloc(&init_mm, pgdp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
if (!pmdp)
return -ENOMEM;
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
} else {
/*
* If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table.
*
*/
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
示例14: unmap_area_sections
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmdp;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmdp = pmd_offset(pud, addr);
do {
pmd_t pmd = *pmdp;
if (!pmd_none(pmd)) {
pmd_clear(pmdp);
init_mm.context.kvm_seq++;
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PMD_SIZE;
pmdp += 2;
} while (addr < end);
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
__check_kvm_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
示例15: remap_area_supersections
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unmap_area_sections(virt, size);
pgd = pgd_offset_k(virt);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
PMD_SECT_SUPER;
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
for (i = 0; i < 8; i++) {
pmd[0] = __pmd(super_pmd_val);
pmd[1] = __pmd(super_pmd_val);
flush_pmd_entry(pmd);
addr += PMD_SIZE;
pmd += 2;
}
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
} while (addr < end);
return 0;
}