本文整理汇总了C++中set_pgd函数的典型用法代码示例。如果您正苦于以下问题:C++ set_pgd函数的具体用法?C++ set_pgd怎么用?C++ set_pgd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_pgd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: set_up_temporary_mappings
static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
int error;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;
/* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
init_level4_pgt[pgd_index(__START_KERNEL_map)]);
/* Set up the direct mapping from scratch */
start = (unsigned long)pfn_to_kaddr(0);
end = (unsigned long)pfn_to_kaddr(max_pfn);
for (; start < end; start = next) {
pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE;
if (next > end)
next = end;
if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
return error;
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
return 0;
}
示例2: set_up_temporary_text_mapping
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
pmd_t *pmd;
pud_t *pud;
p4d_t *p4d = NULL;
pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
/*
* The new mapping only has to cover the page containing the image
* kernel's entry point (jump_address_phys), because the switch over to
* it is carried out by relocated code running from a page allocated
* specifically for this purpose and covered by the identity mapping, so
* the temporary kernel text mapping is only needed for the final jump.
* Moreover, in that mapping the virtual address of the image kernel's
* entry point must be the same as its virtual address in the image
* kernel (restore_jump_address), so the image kernel's
* restore_registers() code doesn't find itself in a different area of
* the virtual address space after switching over to the original page
* tables used by the image kernel.
*/
if (pgtable_l5_enabled()) {
p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
if (!p4d)
return -ENOMEM;
}
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
if (p4d) {
p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
} else {
/* No p4d for 4-level paging: point the pgd to the pud page table */
pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
}
return 0;
}
示例3: new_pgdir
/*H:435
* And this is us, creating the new page directory. If we really do
* allocate a new one (and so the kernel parts are not there), we set
* blank_pgdir.
*/
static unsigned int new_pgdir(struct lg_cpu *cpu,
unsigned long gpgdir,
int *blank_pgdir)
{
unsigned int next;
#ifdef CONFIG_X86_PAE
pmd_t *pmd_table;
#endif
/*
* We pick one entry at random to throw out. Choosing the Least
* Recently Used might be better, but this is easy.
*/
next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
/* If it's never been allocated at all before, try now. */
if (!cpu->lg->pgdirs[next].pgdir) {
cpu->lg->pgdirs[next].pgdir =
(pgd_t *)get_zeroed_page(GFP_KERNEL);
/* If the allocation fails, just keep using the one we have */
if (!cpu->lg->pgdirs[next].pgdir)
next = cpu->cpu_pgd;
else {
#ifdef CONFIG_X86_PAE
/*
* In PAE mode, allocate a pmd page and populate the
* last pgd entry.
*/
pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!pmd_table) {
free_page((long)cpu->lg->pgdirs[next].pgdir);
set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
next = cpu->cpu_pgd;
} else {
set_pgd(cpu->lg->pgdirs[next].pgdir +
SWITCHER_PGD_INDEX,
__pgd(__pa(pmd_table) | _PAGE_PRESENT));
/*
* This is a blank page, so there are no kernel
* mappings: caller must map the stack!
*/
*blank_pgdir = 1;
}
#else
*blank_pgdir = 1;
#endif
}
}
/* Record which Guest toplevel this shadows. */
cpu->lg->pgdirs[next].gpgdir = gpgdir;
/* Release all the non-kernel mappings. */
flush_user_mappings(cpu->lg, next);
return next;
}
示例4: kmem_cache_alloc
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
pgd_test_and_unpin(pgd);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
if (!pmd)
goto out_oom;
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
}
if (!HAVE_SHARED_KERNEL_PMD) {
unsigned long flags;
for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
if (!pmd)
goto out_oom;
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
}
spin_lock_irqsave(&pgd_lock, flags);
for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
unsigned long v = (unsigned long)i << PGDIR_SHIFT;
pgd_t *kpgd = pgd_offset_k(v);
pud_t *kpud = pud_offset(kpgd, v);
pmd_t *kpmd = pmd_offset(kpud, v);
pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
memcpy(pmd, kpmd, PAGE_SIZE);
make_lowmem_page_readonly(
pmd, XENFEAT_writable_page_tables);
}
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
return pgd;
out_oom:
for (i--; i >= 0; i--)
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pgd_cache, pgd);
return NULL;
}
示例5: init_trampoline
/*
* Create PGD aligned trampoline table to allow real mode initialization
* of additional CPUs. Consume only 1 low memory page.
*/
void __meminit init_trampoline(void)
{
unsigned long paddr, paddr_next;
pgd_t *pgd;
pud_t *pud_page, *pud_page_tramp;
int i;
if (!kaslr_memory_enabled()) {
init_trampoline_default();
return;
}
pud_page_tramp = alloc_low_page();
paddr = 0;
pgd = pgd_offset_k((unsigned long)__va(paddr));
pud_page = (pud_t *) pgd_page_vaddr(*pgd);
for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
pud_t *pud, *pud_tramp;
unsigned long vaddr = (unsigned long)__va(paddr);
pud_tramp = pud_page_tramp + pud_index(paddr);
pud = pud_page + pud_index(vaddr);
paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
*pud_tramp = *pud;
}
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
示例6: paging_init
/*
* set up paging
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0,};
pte_t *ppte;
int loop;
/* main kernel space -> RAM mapping is handled as 1:1 transparent by
* the MMU */
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
/* load the VMALLOC area PTE table addresses into the kernel PGD */
ppte = kernel_vmalloc_ptes;
for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
loop++
) {
set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
ppte += PAGE_SIZE / sizeof(pte_t);
}
/* declare the sizes of the RAM zones (only use the normal zone) */
zones_size[ZONE_NORMAL] =
contig_page_data.bdata->node_low_pfn -
contig_page_data.bdata->node_min_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
__flush_tlb_all();
}
示例7: shmedia_mapioaddr
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long flags = 1; /* 1 = CB0-1 device */
DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n", pa, va));
pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp)) {
pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
if (pmdp == NULL) panic("No memory for pmd\n");
memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
}
pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp)) {
ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
if (ptep == NULL) panic("No memory for pte\n");
clear_page((void *)ptep);
set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
}
ptep = pte_offset(pmdp, va);
set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
_PAGE_READ | _PAGE_WRITE |
_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
示例8: init_low_mapping
static void init_low_mapping(void)
{
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
flush_tlb_all();
}
示例9: acpi_restore_state_mem
/*
* acpi_restore_state
*/
void acpi_restore_state_mem(void)
{
#ifndef CONFIG_ACPI_PV_SLEEP
set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
local_flush_tlb();
#endif
}
示例10: kmem_cache_alloc
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
if (!pmd)
goto out_oom;
paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
}
return pgd;
out_oom:
for (i--; i >= 0; i--) {
pgd_t pgdent = pgd[i];
void* pmd = (void *)__va(pgd_val(pgdent)-1);
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
kmem_cache_free(pmd_cache, pmd);
}
kmem_cache_free(pgd_cache, pgd);
return NULL;
}
示例11: quicklist_alloc
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
pmd_t *pmd = pmd_cache_alloc(i);
if (!pmd)
goto out_oom;
paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
}
return pgd;
out_oom:
for (i--; i >= 0; i--) {
pgd_t pgdent = pgd[i];
void* pmd = (void *)__va(pgd_val(pgdent)-1);
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
pmd_cache_free(pmd, i);
}
quicklist_free(0, pgd_dtor, pgd);
return NULL;
}
示例12: kernel_ident_mapping_init
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long pstart, unsigned long pend)
{
unsigned long addr = pstart + info->offset;
unsigned long end = pend + info->offset;
unsigned long next;
int result;
for (; addr < end; addr = next) {
pgd_t *pgd = pgd_page + pgd_index(addr);
pud_t *pud;
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
if (next > end)
next = end;
if (pgd_present(*pgd)) {
pud = pud_offset(pgd, 0);
result = ident_pud_init(info, pud, addr, next);
if (result)
return result;
continue;
}
pud = (pud_t *)info->alloc_pgt_page(info->context);
if (!pud)
return -ENOMEM;
result = ident_pud_init(info, pud, addr, next);
if (result)
return result;
set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
}
return 0;
}
示例13: efi_call_phys_prolog
pgd_t * __init efi_call_phys_prolog(void)
{
unsigned long vaddress;
pgd_t *save_pgd;
int pgd;
int n_pgds;
if (!efi_enabled(EFI_OLD_MEMMAP)) {
save_pgd = (pgd_t *)read_cr3();
write_cr3((unsigned long)efi_scratch.efi_pgt);
goto out;
}
early_code_mapping_set_exec(1);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
for (pgd = 0; pgd < n_pgds; pgd++) {
save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
out:
__flush_tlb_all();
return save_pgd;
}
示例14: sync_current_stack_to_mm
static void sync_current_stack_to_mm(struct mm_struct *mm)
{
unsigned long sp = current_stack_pointer;
pgd_t *pgd = pgd_offset(mm, sp);
if (pgtable_l5_enabled()) {
if (unlikely(pgd_none(*pgd))) {
pgd_t *pgd_ref = pgd_offset_k(sp);
set_pgd(pgd, *pgd_ref);
}
} else {
/*
* "pgd" is faked. The top level entries are "p4d"s, so sync
* the p4d. This compiles to approximately the same code as
* the 5-level case.
*/
p4d_t *p4d = p4d_offset(pgd, sp);
if (unlikely(p4d_none(*p4d))) {
pgd_t *pgd_ref = pgd_offset_k(sp);
p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
set_p4d(p4d, *p4d_ref);
}
}
}
示例15: set_up_temporary_text_mapping
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
pmd_t *pmd;
pud_t *pud;
/*
* The new mapping only has to cover the page containing the image
* kernel's entry point (jump_address_phys), because the switch over to
* it is carried out by relocated code running from a page allocated
* specifically for this purpose and covered by the identity mapping, so
* the temporary kernel text mapping is only needed for the final jump.
* Moreover, in that mapping the virtual address of the image kernel's
* entry point must be the same as its virtual address in the image
* kernel (restore_jump_address), so the image kernel's
* restore_registers() code doesn't find itself in a different area of
* the virtual address space after switching over to the original page
* tables used by the image kernel.
*/
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE));
set_pgd(pgd + pgd_index(restore_jump_address),
__pgd(__pa(pud) | _KERNPG_TABLE));
return 0;
}