本文整理汇总了C++中set_pte函数的典型用法代码示例。如果您正苦于以下问题:C++ set_pte函数的具体用法?C++ set_pte怎么用?C++ set_pte使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_pte函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: map_switcher_in_guest
/*H:480
* (vi) Mapping the Switcher when the Guest is about to run.
*
* The Switcher and the two pages for this CPU need to be visible in the Guest
* (and not the pages for other CPUs).
*
* The pages for the pagetables have all been allocated before: we just need
* to make sure the actual PTEs are up-to-date for the CPU we're about to run
* on.
*/
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
unsigned long base;
struct page *percpu_switcher_page, *regs_page;
pte_t *pte;
struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
/* Switcher page should always be mapped by now! */
BUG_ON(!pgdir->switcher_mapped);
/*
* Remember that we have two pages for each Host CPU, so we can run a
* Guest on each CPU without them interfering. We need to make sure
* those pages are mapped correctly in the Guest, but since we usually
* run on the same CPU, we cache that, and only update the mappings
* when we move.
*/
if (pgdir->last_host_cpu == raw_smp_processor_id())
return;
/* -1 means unknown so we remove everything. */
if (pgdir->last_host_cpu == -1) {
unsigned int i;
for_each_possible_cpu(i)
remove_switcher_percpu_map(cpu, i);
} else {
/* We know exactly what CPU mapping to remove. */
remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
}
/*
* When we're running the Guest, we want the Guest's "regs" page to
* appear where the first Switcher page for this CPU is. This is an
* optimization: when the Switcher saves the Guest registers, it saves
* them into the first page of this CPU's "struct lguest_pages": if we
* make sure the Guest's register page is already mapped there, we
* don't have to copy them out again.
*/
/* Find the shadow PTE for this regs page. */
base = switcher_addr + PAGE_SIZE
+ raw_smp_processor_id() * sizeof(struct lguest_pages);
pte = find_spte(cpu, base, false, 0, 0);
regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
get_page(regs_page);
set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
/*
* We map the second page of the struct lguest_pages read-only in
* the Guest: the IDT, GDT and other things it's not supposed to
* change.
*/
pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
percpu_switcher_page
= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
get_page(percpu_switcher_page);
set_pte(pte, mk_pte(percpu_switcher_page,
__pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
pgdir->last_host_cpu = raw_smp_processor_id();
}
示例2: copy_one_pte
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
{
pte_t pte = *old_pte;
unsigned long page_nr;
if (pte_none(pte))
return;
if (!pte_present(pte)) {
swap_duplicate(pte_val(pte));
set_pte(new_pte, pte);
return;
}
page_nr = MAP_NR(pte_page(pte));
if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) {
set_pte(new_pte, pte);
return;
}
if (cow)
pte = pte_wrprotect(pte);
if (delete_from_swap_cache(page_nr))
pte = pte_mkdirty(pte);
set_pte(new_pte, pte_mkold(pte));
set_pte(old_pte, pte);
mem_map[page_nr].count++;
}
示例3: _copy_pte
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
pte_t pte = READ_ONCE(*src_ptep);
if (pte_valid(pte)) {
/*
* Resume will overwrite areas that may be marked
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
set_pte(dst_ptep, pte_mkwrite(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have
* been in use by the original kernel, in which case we need
* to put it back in our copy to do the restore.
*
* Before marking this entry valid, check the pfn should
* be mapped.
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
}
}
示例4: unuse_pte
/*
* Trying to stop swapping from a file is fraught with races, so
* we repeat quite a bit here when we have to pause. swapoff()
* isn't exactly timing-critical, so who cares (but this is /really/
* inefficient, ugh).
*
* We return 1 after having slept, which makes the process start over
* from the beginning for this process..
*/
static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
pte_t *dir, unsigned int type, unsigned long page)
{
pte_t pte = *dir;
if (pte_none(pte))
return 0;
if (pte_present(pte)) {
unsigned long page_nr = MAP_NR(pte_page(pte));
if (page_nr >= MAP_NR(high_memory))
return 0;
if (!in_swap_cache(page_nr))
return 0;
if (SWP_TYPE(in_swap_cache(page_nr)) != type)
return 0;
delete_from_swap_cache(page_nr);
set_pte(dir, pte_mkdirty(pte));
return 0;
}
if (SWP_TYPE(pte_val(pte)) != type)
return 0;
read_swap_page(pte_val(pte), (char *) page);
#if 0 /* Is this really needed here, hasn't it been solved elsewhere? */
flush_page_to_ram(page);
#endif
if (pte_val(*dir) != pte_val(pte)) {
free_page(page);
return 1;
}
set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
flush_tlb_page(vma, address);
++vma->vm_mm->rss;
swap_free(pte_val(pte));
return 1;
}
示例5: populate_switcher_pte_page
/*H:520
* Setting up the Switcher PTE page for given CPU is fairly easy, given
* the CPU number and the "struct page"s for the Switcher code itself.
*
* Currently the Switcher is less than a page long, so "pages" is always 1.
*/
static __init void populate_switcher_pte_page(unsigned int cpu,
struct page *switcher_page[],
unsigned int pages)
{
unsigned int i;
pte_t *pte = switcher_pte_page(cpu);
/* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) {
set_pte(&pte[i], mk_pte(switcher_page[i],
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
/* The only other thing we map is this CPU's pair of pages. */
i = pages + cpu*2;
/* First page (Guest registers) is writable from the Guest */
set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
/*
* The second page contains the "struct lguest_ro_state", and is
* read-only.
*/
set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
示例6: remove_switcher_percpu_map
/*H:481
* This clears the Switcher mappings for cpu #i.
*/
static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
{
unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
pte_t *pte;
/* Clear the mappings for both pages. */
pte = find_spte(cpu, base, false, 0, 0);
release_pte(*pte);
set_pte(pte, __pte(0));
pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
release_pte(*pte);
set_pte(pte, __pte(0));
}
示例7: rtR0MemObjLinuxFixPte
/**
* Replace the PFN of a PTE with the address of the actual page.
*
* The caller maps a reserved dummy page at the address with the desired access
* and flags.
*
* This hack is required for older Linux kernels which don't provide
* remap_pfn_range().
*
* @returns 0 on success, -ENOMEM on failure.
* @param mm The memory context.
* @param ulAddr The mapping address.
* @param Phys The physical address of the page to map.
*/
static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
{
int rc = -ENOMEM;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, ulAddr);
if (!pgd_none(*pgd) && !pgd_bad(*pgd))
{
pmd_t *pmd = pmd_offset(pgd, ulAddr);
if (!pmd_none(*pmd))
{
pte_t *ptep = pte_offset_map(pmd, ulAddr);
if (ptep)
{
pte_t pte = *ptep;
pte.pte_high &= 0xfff00000;
pte.pte_high |= ((Phys >> 32) & 0x000fffff);
pte.pte_low &= 0x00000fff;
pte.pte_low |= (Phys & 0xfffff000);
set_pte(ptep, pte);
pte_unmap(ptep);
rc = 0;
}
}
示例8: map_page_minicache
/*
* Note that this is intended to be called only from the copy_user_page
* asm code; anything else will require special locking to prevent the
* mini-cache space being re-used. (Note: probably preempt unsafe).
*
* We rely on the fact that the minicache is 2K, and we'll be pushing
* 4K of data through it, so we don't actually have to specifically
* flush the minicache when we change the mapping.
*
* Note also: assert(PAGE_OFFSET <= virt < high_memory).
* Unsafe: preempt, kmap.
*/
unsigned long map_page_minicache(unsigned long virt)
{
set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
cpu_tlb_invalidate_page(minicache_address, 0);
return minicache_address;
}
示例9: remove_mapping_pte_range
static inline void
remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size)
{
pte_t *pte;
unsigned long end;
if (pmd_none (*pmd))
return;
if (pmd_bad (*pmd)){
printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd));
pmd_clear (pmd);
return;
}
pte = pte_offset (pmd, address);
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t entry = *pte;
if (pte_present (entry))
set_pte (pte, pte_modify (entry, PAGE_NONE));
address += PAGE_SIZE;
pte++;
} while (address < end);
}
示例10: remap_area_pte
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
unsigned long pfn;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
if (address >= end)
BUG();
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
address += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
}
示例11: relocate_restore_code
static int relocate_restore_code(void)
{
pgd_t *pgd;
pud_t *pud;
relocated_restore_code = get_safe_page(GFP_ATOMIC);
if (!relocated_restore_code)
return -ENOMEM;
memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
/* Make the page containing the relocated code executable */
pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
pud = pud_offset(pgd, relocated_restore_code);
if (pud_large(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
} else {
pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
if (pmd_large(*pmd)) {
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
} else {
pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
}
}
__flush_tlb_all();
return 0;
}
示例12: allocate_switcher_mapping
/*H:501
* We do need the Switcher code mapped at all times, so we allocate that
* part of the Guest page table here. We map the Switcher code immediately,
* but defer mapping of the guest register page and IDT/LDT etc page until
* just before we run the guest in map_switcher_in_guest().
*
* We *could* do this setup in map_switcher_in_guest(), but at that point
* we've interrupts disabled, and allocating pages like that is fraught: we
* can't sleep if we need to free up some memory.
*/
static bool allocate_switcher_mapping(struct lg_cpu *cpu)
{
int i;
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
CHECK_GPGD_MASK, _PAGE_TABLE);
if (!pte)
return false;
/*
* Map the switcher page if not already there. It might
* already be there because we call allocate_switcher_mapping()
* in guest_set_pgd() just in case it did discard our Switcher
* mapping, but it probably didn't.
*/
if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
/* Get a reference to the Switcher page. */
get_page(lg_switcher_pages[0]);
/* Create a read-only, exectuable, kernel-style PTE */
set_pte(pte,
mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
}
}
cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
return true;
}
示例13: inc_preempt_count
/**
* 建立临时内核映射
* type和CPU共同确定用哪个固定映射的线性地址映射请求页。
*/
void *kmap_atomic(struct page *page, enum km_type type)
{
enum fixed_addresses idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
/**
* 如果被映射的页不属于高端内存,当然用不着映射。直接返回线性地址就行了。
*/
if (!PageHighMem(page))
return page_address(page);
/**
* 通过type和CPU确定线性地址。
*/
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
#endif
/**
* 将线性地址与页表项建立映射。
*/
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/**
* 当然,最后必须刷新一下TLB。然后才能返回线性地址。
*/
__flush_tlb_one(vaddr);
return (void*) vaddr;
}
示例14: bus_tmapin
/*
* Create a temporary, one-page mapping for a device.
* This is used by some device probe routines that
* need to do peek/write/read tricks.
*/
void *
bus_tmapin(int bustype, int pa)
{
vaddr_t pgva;
int off, pte;
if ((bustype < 0) || (bustype >= BUS__NTYPES))
panic("bus_tmapin: bustype");
off = pa & PGOFSET;
pa -= off;
pa &= bus_info[bustype].mask;
pa |= bus_info[bustype].base;
pte = PA_PGNUM(pa);
pte |= (bus_info[bustype].type << PG_MOD_SHIFT);
pte |= (PG_VALID | PG_WRITE | PG_SYSTEM | PG_NC);
if (tmp_vpages_inuse)
panic("bus_tmapin: tmp_vpages_inuse");
tmp_vpages_inuse++;
pgva = tmp_vpages[1];
set_pte(pgva, pte);
return ((void *)(pgva + off));
}
示例15: copy_user_page
/*
* copy_user_page
* @to: P1 address
* @from: P1 address
* @address: U0 address to be mapped
* @page: page (virt_to_page(to))
*/
void copy_user_page(void *to, void *from, unsigned long address,
struct page *page)
{
__set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
_PAGE_RW | _PAGE_CACHABLE |
_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *dir = pgd_offset_k(p3_addr);
pmd_t *pmd = pmd_offset(dir, p3_addr);
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
pte_t entry;
unsigned long flags;
entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr);
local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to);
pte_clear(&init_mm, p3_addr, pte);
up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
}
}