本文整理汇总了C++中read_eflags函数的典型用法代码示例。如果您正苦于以下问题:C++ read_eflags函数的具体用法?C++ read_eflags怎么用?C++ read_eflags使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_eflags函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: bi_checkcpu
/*
* Check to see if this CPU supports long mode.
*/
static int
bi_checkcpu(void)
{
char *cpu_vendor;
int vendor[3];
int eflags, regs[4];
/* Check for presence of "cpuid". */
eflags = read_eflags();
write_eflags(eflags ^ PSL_ID);
if (!((eflags ^ read_eflags()) & PSL_ID))
return (0);
/* Fetch the vendor string. */
do_cpuid(0, regs);
vendor[0] = regs[1];
vendor[1] = regs[3];
vendor[2] = regs[2];
cpu_vendor = (char *)vendor;
/* Check for vendors that support AMD features. */
if (strncmp(cpu_vendor, "GenuineIntel", 12) != 0 &&
strncmp(cpu_vendor, "AuthenticAMD", 12) != 0)
return (0);
/* Has to support AMD features. */
do_cpuid(0x80000000, regs);
if (!(regs[0] >= 0x80000001))
return (0);
/* Check for long mode. */
do_cpuid(0x80000001, regs);
return (regs[3] & AMDID_LM);
}
示例2: BX_ERROR
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code, Bit16u error_code)
{
// real mode interrupt
Bit16u cs_selector, ip;
if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) {
BX_ERROR(("interrupt(real mode) vector > idtr.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
push_16((Bit16u) read_eflags());
cs_selector = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
push_16(cs_selector);
ip = EIP;
push_16(ip);
access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector, 2, 0, BX_READ, &ip);
EIP = (Bit32u) ip;
access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2, 2, 0, BX_READ, &cs_selector);
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector);
/* INT affects the following flags: I,T */
BX_CPU_THIS_PTR clear_IF();
BX_CPU_THIS_PTR clear_TF();
#if BX_CPU_LEVEL >= 4
BX_CPU_THIS_PTR clear_AC();
#endif
BX_CPU_THIS_PTR clear_RF();
}
示例3: trap
void
trap(struct Trapframe *tf)
{
// The environment may have set DF and some versions
// of GCC rely on DF being clear
asm volatile("cld" ::: "cc");
// Halt the CPU if some other CPU has called panic()
extern char *panicstr;
if (panicstr)
asm volatile("hlt");
// Re-acqurie the big kernel lock if we were halted in
// sched_yield()
if (xchg(&thiscpu->cpu_status, CPU_STARTED) == CPU_HALTED)
lock_kernel();
// Check that interrupts are disabled. If this assertion
// fails, DO NOT be tempted to fix it by inserting a "cli" in
// the interrupt path.
assert(!(read_eflags() & FL_IF));
if ((tf->tf_cs & 3) == 3) {
// Trapped from user mode.
// Acquire the big kernel lock before doing any
// serious kernel work.
// LAB 4: Your code here.
assert(curenv);
lock_kernel();
// Garbage collect if current enviroment is a zombie
if (curenv->env_status == ENV_DYING) {
env_free(curenv);
curenv = NULL;
sched_yield();
}
// Copy trap frame (which is currently on the stack)
// into 'curenv->env_tf', so that running the environment
// will restart at the trap point.
curenv->env_tf = *tf;
// The trapframe on the stack should be ignored from here on.
tf = &curenv->env_tf;
}
// Record that tf is the last real trapframe so
// print_trapframe can print some additional information.
last_tf = tf;
// Dispatch based on what type of trap occurred
trap_dispatch(tf);
// If we made it to this point, then no other environment was
// scheduled, so we should return to the current environment
// if doing so makes sense.
if (curenv && curenv->env_status == ENV_RUNNING)
env_run(curenv);
else
sched_yield();
}
示例4: BX_ERROR
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool push_error, Bit16u error_code)
{
if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) {
BX_ERROR(("interrupt(real mode) vector > idtr.limit"));
exception(BX_GP_EXCEPTION, 0);
}
push_16((Bit16u) read_eflags());
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
push_16(IP);
Bit16u new_ip = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector);
// CS.LIMIT can't change when in real/v8086 mode
if (new_ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
BX_ERROR(("interrupt(real mode): instruction pointer not within code segment limits"));
exception(BX_GP_EXCEPTION, 0);
}
Bit16u cs_selector = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2);
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector);
EIP = new_ip;
/* INT affects the following flags: I,T */
BX_CPU_THIS_PTR clear_IF();
BX_CPU_THIS_PTR clear_TF();
#if BX_CPU_LEVEL >= 4
BX_CPU_THIS_PTR clear_AC();
#endif
BX_CPU_THIS_PTR clear_RF();
}
示例5: trap
void
trap(struct Trapframe *tf)
{
asm volatile("cld" : : : "cc");
assert(!(read_eflags() & FL_IF));
// cprintf("Incoming TRAP frame at %p\n", tf);
if ((tf->tf_cs & 3) == 3) {
// Trapped from user mode.
// Copy trap frame (which is currently on the stack)
// into 'curenv->env_tf', so that running the environment
// will restart at the trap point.
assert(curenv);
curenv->env_tf = *tf;
// The trapframe on the stack should be ignored from here on.
tf = &curenv->env_tf;
}
// Dispatch based on what type of trap occurred
trap_dispatch(tf);
// If we made it to this point, then no other environment was
// scheduled, so we should return to the current environment
// if doing so makes sense.
if (curenv && curenv->env_status == ENV_RUNNABLE)
env_run(curenv);
else
sched_yield();
}
示例6: cpu_idle_mwait_cycle
void
cpu_idle_mwait_cycle(void)
{
struct cpu_info *ci = curcpu();
if ((read_eflags() & PSL_I) == 0)
panic("idle with interrupts blocked!");
/* something already queued? */
if (!cpu_is_idle(ci))
return;
/*
* About to idle; setting the MWAIT_IN_IDLE bit tells
* cpu_unidle() that it can't be a no-op and tells cpu_kick()
* that it doesn't need to use an IPI. We also set the
* MWAIT_KEEP_IDLING bit: those routines clear it to stop
* the mwait. Once they're set, we do a final check of the
* queue, in case another cpu called setrunqueue() and added
* something to the queue and called cpu_unidle() between
* the check in sched_idle() and here.
*/
atomic_setbits_int(&ci->ci_mwait, MWAIT_IDLING | MWAIT_ONLY);
if (cpu_is_idle(ci)) {
monitor(&ci->ci_mwait, 0, 0);
if ((ci->ci_mwait & MWAIT_IDLING) == MWAIT_IDLING)
mwait(0, 0);
}
/* done idling; let cpu_kick() know that an IPI is required */
atomic_clearbits_int(&ci->ci_mwait, MWAIT_IDLING);
}
示例7: trap
void
trap(struct Trapframe *tf)
{
// The environment may have set DF and some versions
// of GCC rely on DF being clear
asm volatile("cld" ::: "cc");
// Check that interrupts are disabled. If this assertion
// fails, DO NOT be tempted to fix it by inserting a "cli" in
// the interrupt path.
assert(!(read_eflags() & FL_IF));
if ((tf->tf_cs & 3) == 3) {
// Trapped from user mode.
// Copy trap frame (which is currently on the stack)
// into 'curenv->env_tf', so that running the environment
// will restart at the trap point.
assert(curenv);
curenv->env_tf = *tf;
// The trapframe on the stack should be ignored from here on.
tf = &curenv->env_tf;
}
// Dispatch based on what type of trap occurred
trap_dispatch(tf);
// If we made it to this point, then no other environment was
// scheduled, so we should return to the current environment
// if doing so makes sense.
if (curenv && curenv->env_status == ENV_RUNNABLE)
env_run(curenv);
else
sched_yield();
}
示例8: acpi_timer_test
static int
acpi_timer_test(void)
{
uint32_t last, this;
int min, max, max2, n, delta;
register_t s;
min = INT32_MAX;
max = max2 = 0;
/* Test the timer with interrupts disabled to get accurate results. */
#if defined(__i386__)
s = read_eflags();
#elif defined(__x86_64__)
s = read_rflags();
#else
#error "no read_eflags"
#endif
cpu_disable_intr();
AcpiGetTimer(&last);
for (n = 0; n < 2000; n++) {
AcpiGetTimer(&this);
delta = acpi_TimerDelta(this, last);
if (delta > max) {
max2 = max;
max = delta;
} else if (delta > max2) {
max2 = delta;
}
if (delta < min)
min = delta;
last = this;
}
#if defined(__i386__)
write_eflags(s);
#elif defined(__x86_64__)
write_rflags(s);
#else
#error "no read_eflags"
#endif
delta = max2 - min;
if ((max - min > 8 || delta > 3) && vmm_guest == VMM_GUEST_NONE)
n = 0;
else if (min < 0 || max == 0 || max2 == 0)
n = 0;
else
n = 1;
if (bootverbose) {
kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n",
n ? "GOOD" : "BAD ",
min, max, max - min);
}
return (n);
}
示例9: pushcli
void
pushcli(void)
{
int eflags;
eflags = read_eflags();
cli();
if(cpus[cpu()].ncli++ == 0)
cpus[cpu()].intena = eflags & FL_IF;
}
示例10: popcli
void
popcli(void)
{
if(read_eflags()&FL_IF)
panic("popcli - interruptible");
if(--cpus[cpu()].ncli < 0)
panic("popcli");
if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena)
sti();
}
示例11: BX_ERROR
int BX_CPU_C::v86_redirect_interrupt(Bit8u vector)
{
#if BX_CPU_LEVEL >= 5
if (BX_CPU_THIS_PTR cr4.get_VME())
{
bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
if (BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled < 103) {
BX_ERROR(("INT_Ib(): TR.limit < 103 in VME"));
exception(BX_GP_EXCEPTION, 0);
}
Bit32u io_base = system_read_word(tr_base + 102), offset = io_base - 32 + (vector >> 3);
if (offset > BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled) {
BX_ERROR(("INT_Ib(): failed to fetch VME redirection bitmap"));
exception(BX_GP_EXCEPTION, 0);
}
Bit8u vme_redirection_bitmap = system_read_byte(tr_base + offset);
if (!(vme_redirection_bitmap & (1 << (vector & 7))))
{
// redirect interrupt through virtual-mode idt
Bit16u temp_flags = (Bit16u) read_eflags();
Bit16u temp_CS = system_read_word(vector*4 + 2);
Bit16u temp_IP = system_read_word(vector*4);
if (BX_CPU_THIS_PTR get_IOPL() < 3) {
temp_flags |= EFlagsIOPLMask;
if (BX_CPU_THIS_PTR get_VIF())
temp_flags |= EFlagsIFMask;
else
temp_flags &= ~EFlagsIFMask;
}
Bit16u old_IP = IP;
Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
push_16(temp_flags);
// push return address onto new stack
push_16(old_CS);
push_16(old_IP);
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) temp_CS);
EIP = temp_IP;
BX_CPU_THIS_PTR clear_TF();
BX_CPU_THIS_PTR clear_RF();
if (BX_CPU_THIS_PTR get_IOPL() == 3)
BX_CPU_THIS_PTR clear_IF();
else
BX_CPU_THIS_PTR clear_VIF();
return 1;
}
}
示例12: enter_s4_with_bios
static ACPI_STATUS
enter_s4_with_bios(void)
{
ACPI_OBJECT_LIST ArgList;
ACPI_OBJECT Arg;
u_long ef;
UINT32 ret;
ACPI_STATUS status;
/* run the _PTS and _GTS methods */
ACPI_MEMSET(&ArgList, 0, sizeof(ArgList));
ArgList.Count = 1;
ArgList.Pointer = &Arg;
ACPI_MEMSET(&Arg, 0, sizeof(Arg));
Arg.Type = ACPI_TYPE_INTEGER;
Arg.Integer.Value = ACPI_STATE_S4;
AcpiEvaluateObject(NULL, "\\_PTS", &ArgList, NULL);
AcpiEvaluateObject(NULL, "\\_GTS", &ArgList, NULL);
/* clear wake status */
AcpiSetRegister(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_LOCK);
ef = read_eflags();
disable_intr();
AcpiHwDisableNonWakeupGpes();
/* flush caches */
ACPI_FLUSH_CPU_CACHE();
/*
* write the value to command port and wait until we enter sleep state
*/
do {
AcpiOsStall(1000000);
AcpiOsWritePort(AcpiGbl_FADT->SmiCmd,
AcpiGbl_FADT->S4BiosReq, 8);
status = AcpiGetRegister(ACPI_BITREG_WAKE_STATUS,
&ret, ACPI_MTX_LOCK);
if (ACPI_FAILURE(status))
break;
} while (!ret);
AcpiHwEnableNonWakeupGpes();
write_eflags(ef);
return (AE_OK);
}
示例13: UNUSED
Bit64s BX_CPU_C::param_save(bx_param_c *param, Bit64s val)
{
#else
UNUSED(devptr);
#endif // !BX_USE_CPU_SMF
const char *pname, *segname;
bx_segment_reg_t *segment = NULL;
pname = param->get_name();
if (!strcmp(pname, "cpu_version")) {
val = get_cpu_version_information();
} else if (!strcmp(pname, "cpuid_std")) {
val = get_std_cpuid_features();
} else if (!strcmp(pname, "cpuid_ext")) {
val = get_extended_cpuid_features();
} else if (!strcmp(pname, "EFLAGS")) {
val = BX_CPU_THIS_PTR read_eflags();
#if BX_SUPPORT_X86_64
} else if (!strcmp(pname, "EFER")) {
val = BX_CPU_THIS_PTR get_EFER();
#endif
} else if (!strcmp(pname, "ar_byte") || !strcmp(pname, "selector")) {
segname = param->get_parent()->get_name();
if (!strcmp(segname, "CS")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
} else if (!strcmp(segname, "DS")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
} else if (!strcmp(segname, "SS")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
} else if (!strcmp(segname, "ES")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
} else if (!strcmp(segname, "FS")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
} else if (!strcmp(segname, "GS")) {
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
} else if (!strcmp(segname, "LDTR")) {
segment = &BX_CPU_THIS_PTR ldtr;
} else if (!strcmp(segname, "TR")) {
segment = &BX_CPU_THIS_PTR tr;
}
if (segment != NULL) {
if (!strcmp(pname, "ar_byte")) {
val = ar_byte(&(segment->cache));
}
else if (!strcmp(pname, "selector")) {
val = segment->selector.value;
}
}
}
else {
BX_PANIC(("Unknown param %s in param_save handler !", pname));
}
return val;
}
示例14: init_i486_on_386
/*
* There are i486 based upgrade products for i386 machines.
* In this case, BIOS doesn't enables CPU cache.
*/
void
init_i486_on_386(void)
{
u_long eflags;
eflags = read_eflags();
cpu_disable_intr();
load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
write_eflags(eflags);
}
示例15: pm_init
void pm_init(void)
{
descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
ptr_16_32_t idtr;
/*
* Update addresses in GDT and IDT to their virtual counterparts.
*/
idtr.limit = sizeof(idt);
idtr.base = (uintptr_t) idt;
gdtr_load(&gdtr);
idtr_load(&idtr);
/*
* Each CPU has its private GDT and TSS.
* All CPUs share one IDT.
*/
if (config.cpu_active == 1) {
idt_init();
/*
* NOTE: bootstrap CPU has statically allocated TSS, because
* the heap hasn't been initialized so far.
*/
tss_p = &tss0;
} else {
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
panic("Cannot allocate TSS.");
}
tss_initialize(tss_p);
gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
gdt_p[TSS_DES].special = 1;
gdt_p[TSS_DES].granularity = 0;
gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
/*
* As of this moment, the current CPU has its own GDT pointing
* to its own TSS. We just need to load the TR register.
*/
tr_load(GDT_SELECTOR(TSS_DES));
/* Disable I/O on nonprivileged levels and clear NT flag. */
write_eflags(read_eflags() & ~(EFLAGS_IOPL | EFLAGS_NT));
/* Disable alignment check */
write_cr0(read_cr0() & ~CR0_AM);
}