本文整理汇总了C++中HYPERVISOR_update_va_mapping函数的典型用法代码示例。如果您正苦于以下问题:C++ HYPERVISOR_update_va_mapping函数的具体用法?C++ HYPERVISOR_update_va_mapping怎么用?C++ HYPERVISOR_update_va_mapping使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了HYPERVISOR_update_va_mapping函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: set_aliased_prot
/*
* Set the page permissions for a particular virtual address. If the
* address is a vmalloc mapping (or other non-linear mapping), then
* find the linear mapping of the page and also set its protections to
* match.
*/
static void set_aliased_prot(void *v, pgprot_t prot)
{
int level;
pte_t *ptep;
pte_t pte;
unsigned long pfn;
struct page *page;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
page = pfn_to_page(pfn);
pte = pfn_pte(pfn, prot);
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
if (!PageHighMem(page)) {
void *av = __va(PFN_PHYS(pfn));
if (av != v)
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
BUG();
} else
kmap_flush_unused();
}
示例2: set_aliased_prot
/*
* Set the page permissions for a particular virtual address. If the
* address is a vmalloc mapping (or other non-linear mapping), then
* find the linear mapping of the page and also set its protections to
* match.
*/
static void set_aliased_prot(void *v, pgprot_t prot)
{
int level;
pte_t *ptep;
pte_t pte;
unsigned long pfn;
struct page *page;
unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
page = pfn_to_page(pfn);
pte = pfn_pte(pfn, prot);
/*
* Careful: update_va_mapping() will fail if the virtual address
* we're poking isn't populated in the page tables. We don't
* need to worry about the direct map (that's always in the page
* tables), but we need to be careful about vmap space. In
* particular, the top level page table can lazily propagate
* entries between processes, so if we've switched mms since we
* vmapped the target in the first place, we might not have the
* top-level page table entry populated.
*
* We disable preemption because we want the same mm active when
* we probe the target and when we issue the hypercall. We'll
* have the same nominal mm, but if we're a kernel thread, lazy
* mm dropping could change our pgd.
*
* Out of an abundance of caution, this uses __get_user() to fault
* in the target address just in case there's some obscure case
* in which the target address isn't readable.
*/
preempt_disable();
pagefault_disable(); /* Avoid warnings due to being atomic. */
__get_user(dummy, (unsigned char __user __force *)v);
pagefault_enable();
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
if (!PageHighMem(page)) {
void *av = __va(PFN_PHYS(pfn));
if (av != v)
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
BUG();
} else
kmap_flush_unused();
preempt_enable();
}
示例3: map_ma_at_va
/*
* Add a mapping for the machine page at the given virtual address.
*/
static void
map_ma_at_va(maddr_t ma, native_ptr_t va, uint_t level)
{
x86pte_t *ptep;
x86pte_t pteval;
pteval = ma | pte_bits;
if (level > 0)
pteval |= PT_PAGESIZE;
if (va >= target_kernel_text && pge_support)
pteval |= PT_GLOBAL;
if (map_debug && ma != va)
dboot_printf("mapping ma=0x%" PRIx64 " va=0x%" PRIx64
" pte=0x%" PRIx64 " l=%d\n",
(uint64_t)ma, (uint64_t)va, pteval, level);
#if defined(__xpv)
/*
* see if we can avoid find_pte() on the hypervisor
*/
if (HYPERVISOR_update_va_mapping(va, pteval,
UVMF_INVLPG | UVMF_LOCAL) == 0)
return;
#endif
/*
* Find the pte that will map this address. This creates any
* missing intermediate level page tables
*/
ptep = find_pte(va, NULL, level, 0);
/*
* When paravirtualized, we must use hypervisor calls to modify the
* PTE, since paging is active. On real hardware we just write to
* the pagetables which aren't in use yet.
*/
#if defined(__xpv)
ptep = ptep; /* shut lint up */
if (HYPERVISOR_update_va_mapping(va, pteval, UVMF_INVLPG | UVMF_LOCAL))
dboot_panic("mmu_update failed-map_pa_at_va va=0x%" PRIx64
" l=%d ma=0x%" PRIx64 ", pte=0x%" PRIx64 "",
(uint64_t)va, level, (uint64_t)ma, pteval);
#else
if (va < 1024 * 1024)
pteval |= PT_NOCACHE; /* for video RAM */
if (pae_support)
*ptep = pteval;
else
*((x86pte32_t *)ptep) = (x86pte32_t)pteval;
#endif
}
示例4: xen_load_gdt_boot
/*
* load_gdt for early boot, when the gdt is only mapped once
*/
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
unsigned long frames[pages];
int f;
/*
* A GDT can be up to 64k in size, which corresponds to 8192
* 8-byte entries, or 16 4k pages..
*/
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
pte_t pte;
unsigned long pfn, mfn;
pfn = virt_to_pfn(va);
mfn = pfn_to_mfn(pfn);
pte = pfn_pte(pfn, PAGE_KERNEL_RO);
if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
BUG();
frames[f] = mfn;
}
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
BUG();
}
示例5: grants_init
void grants_init(void)
{
unsigned long frames[NR_GRANT_PAGES];
gnttab_setup_table_t op;
op.dom = DOMID_SELF;
op.nr_frames = NR_GRANT_PAGES;
set_xen_guest_handle(op.frame_list, frames);
int rs = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &op, 1);
if (rs < 0)
fatal_error("grants_init: setup_table failed: %d\n", rs);
for (int i = NR_GRANT_ENTRIES-1; i >= NR_RESERVED_ENTRIES; i--)
{
free_list[i] = free_entry;
free_entry = i;
}
grant_entries = mm_alloc_pages(NR_GRANT_PAGES);
if (grant_entries == 0)
fatal_error("grants_init: grant entries page allocation failed\n");
for (int i = 0; i < NR_GRANT_PAGES; i++)
{
unsigned long ma_grant_table = frames[i] << PAGE_SHIFT;
rs = HYPERVISOR_update_va_mapping((unsigned long)grant_entries + i*PAGE_SIZE,
__pte(ma_grant_table | 7), UVMF_INVLPG);
if (rs < 0)
fatal_error("grants_init: update mapping failed: %d\n", rs);
}
}
示例6: gnttab_post_map_adjust
int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count)
{
unsigned int i;
int rc = 0;
for (i = 0; i < count && rc == 0; ++i, ++map) {
pte_t pte;
if (!(map->flags & GNTMAP_host_map)
|| !(map->flags & GNTMAP_application_map))
continue;
#ifdef CONFIG_X86
pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER
| _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX
| _PAGE_SPECIAL)
& __supported_pte_mask);
#else
#error Architecture not yet supported.
#endif
if (!(map->flags & GNTMAP_readonly))
pte = pte_mkwrite(pte);
if (map->flags & GNTMAP_contains_pte) {
mmu_update_t u;
u.ptr = map->host_addr;
u.val = __pte_val(pte);
rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
} else
rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0);
}
return rc;
}
示例7: start_kernel
/* Main kernel entry point, called by trampoline */
void start_kernel(start_info_t * start_info)
{
/* Define hypervisor upcall entry points */
HYPERVISOR_set_callbacks(
FLAT_KERNEL_CS, (unsigned long)hypervisor_callback,
FLAT_KERNEL_CS, (unsigned long)failsafe_callback);
/* Map the shared info page */
HYPERVISOR_update_va_mapping((unsigned long) shared_info,
__pte(start_info->shared_info),
UVMF_INVLPG);
/* Initialise the console */
console_init(start_info);
/* Write a message to check that it worked */
console_write("Hello world!\n\r");
console_write("Xen magic string: ");
console_write(start_info->magic);
console_write("\n\r");
/* Set up the XenStore driver */
xenstore_init(start_info);
/* Test the store */
xenstore_test();
/* Flush the console buffer */
console_flush();
/* Exit, since we don't know how to do anything else */
}
示例8: early_make_page_readonly
static void __meminit early_make_page_readonly(void *va, unsigned int feature)
{
unsigned long addr, _va = (unsigned long)va;
pte_t pte, *ptep;
unsigned long *page = (unsigned long *) init_level4_pgt;
BUG_ON(after_bootmem);
if (xen_feature(feature))
return;
addr = (unsigned long) page[pgd_index(_va)];
addr_to_page(addr, page);
addr = page[pud_index(_va)];
addr_to_page(addr, page);
addr = page[pmd_index(_va)];
addr_to_page(addr, page);
ptep = (pte_t *) &page[pte_index(_va)];
pte.pte = ptep->pte & ~_PAGE_RW;
if (HYPERVISOR_update_va_mapping(_va, pte, 0))
BUG();
}
示例9: suspend_gnttab
void
suspend_gnttab(void)
{
int i;
for (i = 0; i < NR_GRANT_FRAMES; i++) {
HYPERVISOR_update_va_mapping((unsigned long)(((char *)gnttab_table) + PAGE_SIZE*i),
(pte_t){0x0<<PAGE_SHIFT}, UVMF_INVLPG);
}
}
示例10: xen_set_identity_and_remap_chunk
/*
* This function takes a contiguous pfn range that needs to be identity mapped
* and:
*
* 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
* 2) Calls the do_ function to actually do the mapping/remapping work.
*
* The goal is to not allocate additional memory but to remap the existing
* pages. In the case of an error the underlying memory is simply released back
* to Xen and not remapped.
*/
static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pfn)
{
unsigned long pfn;
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
remap_pfn = nr_pages;
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i;
unsigned long size = left;
unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
size = nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages);
break;
}
/* Adjust size to fit in current e820 RAM region */
if (size > remap_range_size)
size = remap_range_size;
xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
}
/*
* If the PFNs are currently mapped, the VA mapping also needs
* to be updated to be 1:1.
*/
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
(void)HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(pfn, PAGE_KERNEL_IO), 0);
return remap_pfn;
}
示例11: pgd_walk_set_prot
static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
{
struct page *page = virt_to_page(pt);
unsigned long pfn = page_to_pfn(page);
if (PageHighMem(page))
return;
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(pfn, flags), 0));
}
示例12:
static
shared_info_t *map_shared_info(unsigned long pa)
{
if ( HYPERVISOR_update_va_mapping(
(unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG) )
{
printk("Failed to map shared_info!!\n");
do_exit();
}
return (shared_info_t *)shared_info;
}
示例13: pte_free
void pte_free(struct page *pte)
{
unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
if (!pte_write(*virt_to_ptep(va)))
BUG_ON(HYPERVISOR_update_va_mapping(
va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
ClearPageForeign(pte);
init_page_count(pte);
__free_page(pte);
}
示例14: xen_arch_pre_suspend
void xen_arch_pre_suspend(void)
{
xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn =
mfn_to_pfn(xen_start_info->console.domU.mfn);
BUG_ON(!irqs_disabled());
HYPERVISOR_shared_info = &xen_dummy_shared_info;
if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
__pte_ma(0), 0))
BUG();
}
示例15: make_lowmem_page_readwrite
void make_lowmem_page_readwrite(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
pte = lookup_address(address);
BUG_ON(pte == NULL);
ptev = pte_mkwrite(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}