本文整理汇总了C++中xen_feature函数的典型用法代码示例。如果您正苦于以下问题:C++ xen_feature函数的具体用法?C++ xen_feature怎么用?C++ xen_feature使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了xen_feature函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mm_pin_all
void mm_pin_all(void)
{
struct page *page;
if (xen_feature(XENFEAT_writable_page_tables))
return;
for (page = pgd_list; page; page = (struct page *)page->index) {
if (!test_bit(PG_pinned, &page->flags))
__pgd_pin((pgd_t *)page_address(page));
}
}
示例2: make_pages_writable
void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
{
if (xen_feature(feature))
return;
while (nr-- != 0) {
make_page_writable(va, feature);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
示例3: make_pages_readonly
void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
{
if (xen_feature(feature))
return;
while (nr-- != 0) {
__make_page_readonly(va);
va = (void*)((unsigned long)va + PAGE_SIZE);
}
}
示例4: xen_hvm_set_callback
/*
* Tell the hypervisor how to contact us for event channel callbacks.
*/
void
xen_hvm_set_callback(device_t dev)
{
struct xen_hvm_param xhp;
int irq;
if (xen_vector_callback_enabled)
return;
xhp.domid = DOMID_SELF;
xhp.index = HVM_PARAM_CALLBACK_IRQ;
if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
int error;
error = set_percpu_callback(0);
if (error == 0) {
xen_evtchn_needs_ack = true;
/* Trick toolstack to think we are enlightened */
xhp.value = 1;
} else
xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
if (error == 0) {
xen_vector_callback_enabled = 1;
return;
} else if (xen_evtchn_needs_ack)
panic("Unable to setup fake HVM param: %d", error);
printf("Xen HVM callback vector registration failed (%d). "
"Falling back to emulated device interrupt\n", error);
}
xen_vector_callback_enabled = 0;
if (dev == NULL) {
/*
* Called from early boot or resume.
* xenpci will invoke us again later.
*/
return;
}
irq = pci_get_irq(dev);
if (irq < 16) {
xhp.value = HVM_CALLBACK_GSI(irq);
} else {
u_int slot;
u_int pin;
slot = pci_get_slot(dev);
pin = pci_get_intpin(dev) - 1;
xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
}
if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
panic("Can't set evtchn callback");
}
示例5: xen_setup_mfn_list_list
void xen_setup_mfn_list_list(void)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
virt_to_mfn(p2m_top_mfn);
HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
}
示例6: xen_machphys_update
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
mmu_update_t u;
if (xen_feature(XENFEAT_auto_translated_physmap)) {
BUG_ON(pfn != mfn);
return;
}
u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
u.val = pfn;
BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
示例7: xen_hvm_need_lapic
bool xen_hvm_need_lapic(void)
{
if (xen_nopv)
return false;
if (xen_pv_domain())
return false;
if (!xen_hvm_domain())
return false;
if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
return false;
return true;
}
示例8: xen_set_identity_and_release
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
phys_addr_t start = 0;
unsigned long released = 0;
unsigned long identity = 0;
const struct e820entry *entry;
int i;
int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and
* release the pages (if available) in those non-RAM regions.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
* mapping. This is needed for some BIOSes that put (for
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) {
if (xlated_phys) {
xen_pvh_adjust_stats(start_pfn,
end_pfn, &released, &identity,
nr_pages);
} else {
xen_set_identity_and_release_chunk(
start_pfn, end_pfn, nr_pages,
&released, &identity);
}
}
start = end;
}
}
if (released)
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
if (identity)
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
示例9: xen_smp_prepare_boot_cpu
static void __init xen_smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu();
if (!xen_feature(XENFEAT_writable_page_tables)) {
/* We've switched to the "real" per-cpu gdt, so make sure the
* old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt);
}
xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
}
示例10: make_lowmem_page_writable
void make_lowmem_page_writable(void *va, unsigned int feature)
{
pte_t *pte;
int rc;
if (xen_feature(feature))
return;
pte = virt_to_ptep(va);
rc = HYPERVISOR_update_va_mapping(
(unsigned long)va, pte_mkwrite(*pte), 0);
BUG_ON(rc);
}
示例11: gnttab_map_refs
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
pte_t *pte;
unsigned long mfn;
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
return ret;
/* Retry eagain maps */
for (i = 0; i < count; i++)
if (map_ops[i].status == GNTST_eagain)
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret)
goto out;
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret;
}
示例12: gnttab_map_refs
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
unsigned long mfn;
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
return ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
/* If you really wanted to do this:
* mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
*
* The reason we do not implement it is b/c on the
* unmap path (gnttab_unmap_refs) we have no means of
* checking whether the page is !GNTMAP_contains_pte.
*
* That is without some extra data-structure to carry
* the struct page, bool clear_pte, and list_head next
* tuples and deal with allocation/delallocation, etc.
*
* The users of this API set the GNTMAP_contains_pte
* flag so lets just return not supported until it
* becomes neccessary to implement.
*/
return -EOPNOTSUPP;
}
ret = m2p_add_override(mfn, pages[i],
map_ops[i].flags & GNTMAP_contains_pte);
if (ret)
return ret;
}
return ret;
}
示例13: xen_build_dynamic_phys_to_machine
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
xen_max_p2m_pfn = xen_p2m_size;
}
示例14: pgd_dtor
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
if (PTRS_PER_PMD > 1) {
if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
xen_destroy_contiguous_region((unsigned long)pgd, 0);
} else {
spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
pgd_test_and_unpin(pgd);
}
}
示例15: xen_hvm_init_time_ops
void __init xen_hvm_init_time_ops(void)
{
if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n");
return;
}
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
x86_platform.calibrate_tsc = xen_tsc_khz;
x86_platform.get_wallclock = xen_get_wallclock;
x86_platform.set_wallclock = xen_set_wallclock;
}