本文整理汇总了C++中HYPERVISOR_vcpu_op函数的典型用法代码示例。如果您正苦于以下问题:C++ HYPERVISOR_vcpu_op函数的具体用法?C++ HYPERVISOR_vcpu_op怎么用?C++ HYPERVISOR_vcpu_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了HYPERVISOR_vcpu_op函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xen_vcpu_initialize
static int
xen_vcpu_initialize(processorid_t id, vcpu_guest_context_t *vgc)
{
int err;
if ((err = HYPERVISOR_vcpu_op(VCPUOP_initialise, id, vgc)) != 0) {
char *str;
int level = CE_WARN;
switch (err) {
case -X_EINVAL:
/*
* This interface squashes multiple error sources
* to one error code. In particular, an X_EINVAL
* code can mean:
*
* - the vcpu id is out of range
* - cs or ss are in ring 0
* - cr3 is wrong
* - an entry in the new gdt is above the
* reserved entry
* - a frame underneath the new gdt is bad
*/
str = "something is wrong :(";
break;
case -X_ENOENT:
str = "no such cpu";
break;
case -X_ENOMEM:
str = "no mem to copy ctxt";
break;
case -X_EFAULT:
str = "bad address";
break;
case -X_EEXIST:
/*
* Hmm. This error is returned if the vcpu has already
* been initialized once before in the lifetime of this
* domain. This is a logic error in the kernel.
*/
level = CE_PANIC;
str = "already initialized";
break;
default:
level = CE_PANIC;
str = "<unexpected>";
break;
}
cmn_err(level, "vcpu%d: failed to init: error %d: %s",
id, -err, str);
}
return (err);
}
示例2: cpu_initialize_context
static __cpuinit int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
struct vcpu_guest_context *ctxt;
struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
if (cpu_test_and_set(cpu, cpu_initialized_map))
return 0;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (ctxt == NULL)
return -ENOMEM;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
ctxt->user_regs.fs = __KERNEL_PERCPU;
ctxt->user_regs.gs = 0;
ctxt->user_regs.ss = __KERNEL_DS;
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
xen_copy_trap_info(ctxt->trap_ctxt);
ctxt->ldt_ents = 0;
BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK);
make_lowmem_page_readonly(gdt->gdt);
ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt);
ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt);
ctxt->user_regs.cs = __KERNEL_CS;
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->kernel_ss = __KERNEL_DS;
ctxt->kernel_sp = idle->thread.sp0;
ctxt->event_callback_cs = __KERNEL_CS;
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
ctxt->failsafe_callback_cs = __KERNEL_CS;
ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
BUG();
kfree(ctxt);
return 0;
}
示例3: stop_self
static void stop_self(void *v)
{
int cpu = smp_processor_id();
/* make sure we're not pinning something down */
load_cr3(swapper_pg_dir);
/* should set up a minimal gdt */
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
BUG();
}
示例4: xen_timer_resume
void xen_timer_resume(void)
{
int cpu;
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
for_each_online_cpu(cpu) {
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
}
}
示例5: xen_fill_possible_map
static void __init xen_fill_possible_map(void)
{
int i, rc;
for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) {
num_processors++;
set_cpu_possible(i, true);
}
}
}
示例6: xen_fill_possible_map
static void __init xen_fill_possible_map(void)
{
int i, rc;
for (i = 0; i < NR_CPUS; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) {
num_processors++;
cpu_set(i, cpu_possible_map);
}
}
}
示例7: xen_vcpu_restore
/*
* On restore, set the vcpu placement up again.
* If it fails, then we're in a bad state, since
* we can't back out from using it...
*/
void xen_vcpu_restore(void)
{
int cpu;
for_each_online_cpu(cpu) {
bool other_cpu = (cpu != smp_processor_id());
if (other_cpu &&
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
BUG();
xen_setup_runstate_info(cpu);
if (have_vcpu_info_placement)
xen_vcpu_setup(cpu);
if (other_cpu &&
HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
BUG();
}
}
示例8: init_smp
void init_smp(void)
{
unsigned int cpu;
int res;
memset(percpu, 0, sizeof(struct cpu_private) * MAX_VIRT_CPUS);
init_cpu_pda(0);
/*
* Init of CPU0 is completed, smp_init_completed must be set before we
* initialise remaining CPUs, because smp_proccessor_id macro will not
* work properly
*/
smp_init_completed = 1;
/*
* We have now completed the init of cpu0
*/
if (trace_smp())
tprintk("Initing SMP cpus.\n");
for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) {
per_cpu(cpu, cpu_state) = CPU_DOWN;
res = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
if (res >= 0) {
if (trace_smp())
tprintk("Bringing up CPU=%d\n", cpu);
cpu_initialize_context(cpu);
BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
spin_lock(&cpu_lock);
smp_active++;
spin_unlock(&cpu_lock);
}
}
if (trace_smp()) {
tprintk("SMP: %d CPUs active\n", smp_active);
for (cpu = 0; cpu < MAX_VIRT_CPUS; cpu++) {
tprintk("SMP: cpu_state %d %d\n", cpu, per_cpu(cpu, cpu_state));
}
}
if (trace_sched()) ttprintk("SMP %d\n", smp_active);
}
示例9: xen_cpu_up
static int __cpuinit xen_cpu_up(unsigned int cpu)
{
struct task_struct *idle = idle_task(cpu);
int rc;
#ifdef CONFIG_X86_64
/* Allocate node local memory for AP pdas */
WARN_ON(cpu == 0);
if (cpu > 0) {
rc = get_local_pda(cpu);
if (rc)
return rc;
}
#endif
#ifdef CONFIG_X86_32
init_gdt(cpu);
per_cpu(current_task, cpu) = idle;
irq_ctx_init(cpu);
#else
cpu_pda(cpu)->pcurrent = idle;
clear_tsk_thread_flag(idle, TIF_FORK);
#endif
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
rc = cpu_initialize_context(cpu, idle);
if (rc)
return rc;
if (num_online_cpus() == 1)
alternatives_smp_switch(1);
rc = xen_smp_intr_init(cpu);
if (rc)
return rc;
rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
BUG_ON(rc);
while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
barrier();
}
return 0;
}
示例10: mptable_probe_cpus
static int
mptable_probe_cpus(void)
{
int i, rc;
for (i = 0; i < MAXCPU; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0)
cpu_add(i, (i == 0));
}
return (0);
}
示例11: xen_play_dead
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
/*
* commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
* clears certain data that the cpu_idle loop (which called us
* and that we return from) expects. The only way to get that
* data back is to call:
*/
tick_nohz_idle_enter();
}
示例12: stop_self
static void stop_self(void *v)
{
int cpu = smp_processor_id();
load_cr3(swapper_pg_dir);
set_cpu_online(cpu, false);
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
BUG();
}
示例13: xen_play_dead
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
/*
* Balance out the preempt calls - as we are running in cpu_idle
* loop which has been called at bootup from cpu_bringup_and_idle.
* The cpucpu_bringup_and_idle called cpu_bringup which made a
* preempt_disable() So this preempt_enable will balance it out.
*/
preempt_enable();
}
示例14: xen_vcpu_setup
static void xen_vcpu_setup(int cpu)
{
struct vcpu_register_vcpu_info info;
int err;
struct vcpu_info *vcpup;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*
* This path is called twice on PVHVM - first during bootup via
* smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
* hotplugged: cpu_up -> xen_hvm_cpu_notify.
* As we can only do the VCPUOP_register_vcpu_info once lets
* not over-write its result.
*
* For PV it is called during restore (xen_vcpu_restore) and bootup
* (xen_setup_vcpu_info_placement). The hotplug mechanism does not
* use this function.
*/
if (xen_hvm_domain()) {
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
return;
}
if (cpu < MAX_VIRT_CPUS)
per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
if (!have_vcpu_info_placement) {
if (cpu >= MAX_VIRT_CPUS)
clamp_max_cpus();
return;
}
vcpup = &per_cpu(xen_vcpu_info, cpu);
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
/* Check to see if the hypervisor will put the vcpu_info
structure where we want it, which allows direct access via
a percpu-variable. */
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
if (err) {
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
have_vcpu_info_placement = 0;
clamp_max_cpus();
} else {
/* This cpu is using the registered vcpu info, even if
later ones fail to. */
per_cpu(xen_vcpu, cpu) = vcpup;
}
}
示例15: play_dead
static inline void play_dead(void)
{
extern void idle_task_exit(void); /* XXXAP find proper place */
idle_task_exit();
local_irq_disable();
cpu_clear(smp_processor_id(), cpu_initialized);
preempt_enable_no_resched();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
/* Same as arch/xen/kernel/smpboot.c:cpu_bringup(). */
cpu_init();
preempt_disable();
local_irq_enable();
}