本文整理汇总了C++中rdmsr函数的典型用法代码示例。如果您正苦于以下问题:C++ rdmsr函数的具体用法?C++ rdmsr怎么用?C++ rdmsr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rdmsr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generate_P_state_entries
static void generate_P_state_entries(int core, int cores_per_package)
{
int ratio_min, ratio_max, ratio_turbo, ratio_step, ratio_range_2;
int coord_type, power_max, power_unit, num_entries;
int ratio, power, clock, clock_max;
int vid, vid_turbo, vid_min, vid_max, vid_range_2;
u32 control_status;
const struct pattrs *pattrs = pattrs_get();
msr_t msr;
/* Inputs from CPU attributes */
ratio_max = pattrs->iacore_ratios[IACORE_MAX];
ratio_min = pattrs->iacore_ratios[IACORE_LFM];
vid_max = pattrs->iacore_vids[IACORE_MAX];
vid_min = pattrs->iacore_vids[IACORE_LFM];
/* Hardware coordination of P-states */
coord_type = HW_ALL;
/* Max Non-Turbo Frequency */
clock_max = (ratio_max * pattrs->bclk_khz) / 1000;
/* Calculate CPU TDP in mW */
msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
power_unit = 1 << (msr.lo & 0xf);
msr = rdmsr(MSR_PKG_POWER_LIMIT);
power_max = ((msr.lo & 0x7fff) / power_unit) * 1000;
/* Write _PCT indicating use of FFixedHW */
acpigen_write_empty_PCT();
/* Write _PPC with NVS specified limit on supported P-state */
acpigen_write_PPC_NVS();
/* Write PSD indicating configured coordination type */
acpigen_write_PSD_package(core, 1, coord_type);
/* Add P-state entries in _PSS table */
acpigen_write_name("_PSS");
/* Determine ratio points */
ratio_step = 1;
num_entries = (ratio_max - ratio_min) / ratio_step;
while (num_entries > 15) { /* ACPI max is 15 ratios */
ratio_step <<= 1;
num_entries >>= 1;
}
/* P[T] is Turbo state if enabled */
if (get_turbo_state() == TURBO_ENABLED) {
/* _PSS package count including Turbo */
acpigen_write_package(num_entries + 2);
ratio_turbo = pattrs->iacore_ratios[IACORE_TURBO];
vid_turbo = pattrs->iacore_vids[IACORE_TURBO];
control_status = (ratio_turbo << 8) | vid_turbo;
/* Add entry for Turbo ratio */
acpigen_write_PSS_package(
clock_max + 1, /*MHz*/
power_max, /*mW*/
10, /*lat1*/
10, /*lat2*/
control_status, /*control*/
control_status); /*status*/
} else {
/* _PSS package count without Turbo */
acpigen_write_package(num_entries + 1);
ratio_turbo = ratio_max;
vid_turbo = vid_max;
}
/* First regular entry is max non-turbo ratio */
control_status = (ratio_max << 8) | vid_max;
acpigen_write_PSS_package(
clock_max, /*MHz*/
power_max, /*mW*/
10, /*lat1*/
10, /*lat2*/
control_status, /*control */
control_status); /*status*/
/* Set up ratio and vid ranges for VID calculation */
ratio_range_2 = (ratio_turbo - ratio_min) * 2;
vid_range_2 = (vid_turbo - vid_min) * 2;
/* Generate the remaining entries */
for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
ratio >= ratio_min; ratio -= ratio_step) {
/* Calculate VID for this ratio */
vid = ((ratio - ratio_min) * vid_range_2) /
ratio_range_2 + vid_min;
/* Round up if remainder */
if (((ratio - ratio_min) * vid_range_2) % ratio_range_2)
vid++;
/* Calculate power at this ratio */
power = calculate_power(power_max, ratio_max, ratio);
clock = (ratio * pattrs->bclk_khz) / 1000;
//.........这里部分代码省略.........
示例2: init_secondary
/*
* AP cpu's call this to sync up protected mode.
*
* WARNING! %gs is not set up on entry. This routine sets up %gs.
*/
void
init_secondary(void)
{
int gsel_tss;
int x, myid = bootAP;
u_int64_t msr, cr0;
struct mdglobaldata *md;
struct privatespace *ps;
ps = CPU_prvspace[myid];
gdt_segs[GPROC0_SEL].ssd_base =
(long) &ps->mdglobaldata.gd_common_tss;
ps->mdglobaldata.mi.gd_prvspace = ps;
/* We fill the 32-bit segment descriptors */
for (x = 0; x < NGDT; x++) {
if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
}
/* And now a 64-bit one */
ssdtosyssd(&gdt_segs[GPROC0_SEL],
(struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
r_gdt.rd_base = (long) &gdt[myid * NGDT];
lgdt(&r_gdt); /* does magic intra-segment return */
/* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
wrmsr(MSR_FSBASE, 0); /* User value */
wrmsr(MSR_GSBASE, (u_int64_t)ps);
wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]);
#if 0
lldt(_default_ldt);
mdcpu->gd_currentldt = _default_ldt;
#endif
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
#if 0 /* JG XXX */
md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
#endif
md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
md->gd_common_tssd = *md->gd_tss_gdt;
/* double fault stack */
md->gd_common_tss.tss_ist1 =
(long)&md->mi.gd_prvspace->idlestack[
sizeof(md->mi.gd_prvspace->idlestack)];
ltr(gsel_tss);
/*
* Set to a known state:
* Set by mpboot.s: CR0_PG, CR0_PE
* Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
*/
cr0 = rcr0();
cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
load_cr0(cr0);
/* Set up the fast syscall stuff */
msr = rdmsr(MSR_EFER) | EFER_SCE;
wrmsr(MSR_EFER, msr);
wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
wrmsr(MSR_STAR, msr);
wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL);
pmap_set_opt(); /* PSE/4MB pages, etc */
pmap_init_pat(); /* Page Attribute Table */
/* set up CPU registers and state */
cpu_setregs();
/* set up SSE/NX registers */
initializecpu(myid);
/* set up FPU state on the AP */
npxinit();
/* disable the APIC, just to be SURE */
lapic->svr &= ~APIC_SVR_ENABLE;
}
示例3: SetDelayControl
/**
* This is Black Magic DRAM timing juju[1].
*
* DRAM delay depends on CPU clock, memory bus clock, memory bus loading,
* memory bus termination, your middle initial (ha! caught you!), GeodeLink
* clock rate, and DRAM timing specifications.
*
* From this the code computes a number which is "known to work". No,
* hardware is not an exact science. And, finally, if an FS2 (JTAG debugger)
* is hooked up, then just don't do anything. This code was written by a master
* of the Dark Arts at AMD and should not be modified in any way.
*
* [1] (http://www.thefreedictionary.com/juju)
*
* @param dimm0 The SMBus address of DIMM 0 (mainboard dependent).
* @param dimm1 The SMBus address of DIMM 1 (mainboard dependent).
* @param terminated The bus is terminated. (mainboard dependent).
*/
static void SetDelayControl(u8 dimm0, u8 dimm1, int terminated)
{
u32 glspeed;
u8 spdbyte0, spdbyte1, dimms, i;
msr_t msr;
glspeed = GeodeLinkSpeed();
/* Fix delay controls for DM and IM arrays. */
for (i = 0; i < ARRAY_SIZE(delay_msr_table); i++)
wrmsr(delay_msr_table[i].index, delay_msr_table[i].msr);
msr = rdmsr(GLCP_FIFOCTL);
msr.hi = 0x00000005;
wrmsr(GLCP_FIFOCTL, msr);
/* Enable setting. */
msr.hi = 0;
msr.lo = 0x00000001;
wrmsr(CPU_BC_MSS_ARRAY_CTL_ENA, msr);
/* Debug Delay Control setup check.
* Leave it alone if it has been setup. FS2 or something is here.
*/
msr = rdmsr(GLCP_DELAY_CONTROLS);
if (msr.lo & ~(DELAY_LOWER_STATUS_MASK))
return;
/* Delay Controls based on DIMM loading. UGH!
* Number of devices = module width (SPD 6) / device width (SPD 13)
* * physical banks (SPD 5)
*
* Note: We only support a module width of 64.
*/
dimms = 0;
spdbyte0 = spd_read_byte(dimm0, SPD_PRIMARY_SDRAM_WIDTH);
if (spdbyte0 != 0xFF) {
dimms++;
spdbyte0 = (u8)64 / spdbyte0 *
(u8)(spd_read_byte(dimm0, SPD_NUM_DIMM_BANKS));
} else {
spdbyte0 = 0;
}
spdbyte1 = spd_read_byte(dimm1, SPD_PRIMARY_SDRAM_WIDTH);
if (spdbyte1 != 0xFF) {
dimms++;
spdbyte1 = (u8)64 / spdbyte1 *
(u8)(spd_read_byte(dimm1, SPD_NUM_DIMM_BANKS));
} else {
spdbyte1 = 0;
}
/* Zero GLCP_DELAY_CONTROLS MSR */
msr.hi = msr.lo = 0;
/* Save some power, disable clock to second DIMM if it is empty. */
if (spdbyte1 == 0)
msr.hi |= DELAY_UPPER_DISABLE_CLK135;
spdbyte0 += spdbyte1;
if ((dimms == 1) && (terminated == DRAM_TERMINATED)) {
msr.hi = 0xF2F100FF;
msr.lo = 0x56960004;
} else for (i = 0; i < ARRAY_SIZE(delay_control_table); i++) {
if ((dimms == delay_control_table[i].dimms) &&
(spdbyte0 <= delay_control_table[i].devices)) {
if (glspeed < 334) {
msr.hi |= delay_control_table[i].slow_hi;
msr.lo |= delay_control_table[i].slow_low;
} else {
msr.hi |= delay_control_table[i].fast_hi;
msr.lo |= delay_control_table[i].fast_low;
}
break;
}
}
wrmsr(GLCP_DELAY_CONTROLS, msr);
}
示例4: via_nano_setup
void
via_nano_setup(struct cpu_info *ci)
{
u_int32_t regs[4], val;
u_int64_t msreg;
int model = (ci->ci_signature >> 4) & 15;
if (model >= 9) {
CPUID(0xC0000000, regs[0], regs[1], regs[2], regs[3]);
val = regs[0];
if (val >= 0xC0000001) {
CPUID(0xC0000001, regs[0], regs[1], regs[2], regs[3]);
val = regs[3];
} else
val = 0;
if (val & (C3_CPUID_HAS_RNG | C3_CPUID_HAS_ACE))
printf("%s:", ci->ci_dev->dv_xname);
/* Enable RNG if present and disabled */
if (val & C3_CPUID_HAS_RNG) {
extern int viac3_rnd_present;
if (!(val & C3_CPUID_DO_RNG)) {
msreg = rdmsr(0x110B);
msreg |= 0x40;
wrmsr(0x110B, msreg);
}
viac3_rnd_present = 1;
printf(" RNG");
}
/* Enable AES engine if present and disabled */
if (val & C3_CPUID_HAS_ACE) {
#ifdef CRYPTO
if (!(val & C3_CPUID_DO_ACE)) {
msreg = rdmsr(0x1107);
msreg |= (0x01 << 28);
wrmsr(0x1107, msreg);
}
amd64_has_xcrypt |= C3_HAS_AES;
#endif /* CRYPTO */
printf(" AES");
}
/* Enable ACE2 engine if present and disabled */
if (val & C3_CPUID_HAS_ACE2) {
#ifdef CRYPTO
if (!(val & C3_CPUID_DO_ACE2)) {
msreg = rdmsr(0x1107);
msreg |= (0x01 << 28);
wrmsr(0x1107, msreg);
}
amd64_has_xcrypt |= C3_HAS_AESCTR;
#endif /* CRYPTO */
printf(" AES-CTR");
}
/* Enable SHA engine if present and disabled */
if (val & C3_CPUID_HAS_PHE) {
#ifdef CRYPTO
if (!(val & C3_CPUID_DO_PHE)) {
msreg = rdmsr(0x1107);
msreg |= (0x01 << 28/**/);
wrmsr(0x1107, msreg);
}
amd64_has_xcrypt |= C3_HAS_SHA;
#endif /* CRYPTO */
printf(" SHA1 SHA256");
}
/* Enable MM engine if present and disabled */
if (val & C3_CPUID_HAS_PMM) {
#ifdef CRYPTO
if (!(val & C3_CPUID_DO_PMM)) {
msreg = rdmsr(0x1107);
msreg |= (0x01 << 28/**/);
wrmsr(0x1107, msreg);
}
amd64_has_xcrypt |= C3_HAS_MM;
#endif /* CRYPTO */
printf(" RSA");
}
printf("\n");
}
}
示例5: early_init_intel
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
/* Unmask CPUID levels if masked: */
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
}
}
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
unsigned lower_word;
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
/* Required by the SDM */
sync_core();
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
}
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
* A race condition between speculative fetches and invalidating
* a large page. This is worked around in microcode, but we
* need the microcode to have already been loaded... so if it is
* not, recommend a BIOS update and disable large pages.
*/
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
c->microcode < 0x20e) {
printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE);
}
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#else
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15 && c->x86_cache_alignment == 64)
c->x86_cache_alignment = 128;
#endif
/* CPUID workaround for 0F33/0F34 CPU */
if (c->x86 == 0xF && c->x86_model == 0x3
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4))
c->x86_phys_bits = 36;
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states.
*
* It is also reliable across cores and sockets. (but not across
* cabinets - we turn it off in that case explicitly.)
*/
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (!check_tsc_unstable())
sched_clock_stable = 1;
}
/*
* There is a known erratum on Pentium III and Core Solo
* and Core Duo CPUs.
* " Page with PAT set to WC while associated MTRR is UC
* may consolidate to UC "
* Because of this erratum, it is better to stick with
* setting WC in MTRR rather than using PAT on these CPUs.
*
* Enable PAT WC only on P4, Core 2 or later CPUs.
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);
#ifdef CONFIG_KMEMCHECK
/*
* P4s have a "fast strings" feature which causes single-
* stepping REP instructions to only generate a #DB on
* cache-line boundaries.
*
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
if (c->x86 == 15) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
//.........这里部分代码省略.........
示例6: cache_as_ram_main
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
struct sys_info *sysinfo = &sysinfo_car;
static const u8 spd_addr[] = {RC00, DIMM0, DIMM2, 0, 0, DIMM1, DIMM3, 0, 0, };
u32 bsp_apicid = 0, val;
msr_t msr;
timestamp_init(timestamp_get());
timestamp_add_now(TS_START_ROMSTAGE);
if (!cpu_init_detectedx && boot_cpu()) {
/* Nothing special needs to be done to find bus 0 */
/* Allow the HT devices to be found */
/* mov bsp to bus 0xff when > 8 nodes */
set_bsp_node_CHtExtNodeCfgEn();
enumerate_ht_chain();
sb7xx_51xx_pci_port80();
}
post_code(0x30);
if (bist == 0) {
bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo); /* mmconf is inited in init_cpus */
/* All cores run this but the BSP(node0,core0) is the only core that returns. */
}
post_code(0x32);
enable_rs780_dev8();
sb7xx_51xx_lpc_init();
ite_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
it8718f_disable_reboot(GPIO_DEV);
console_init();
// dump_mem(CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE-0x200, CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE);
/* Halt if there was a built in self test failure */
report_bist_failure(bist);
// Load MPB
val = cpuid_eax(1);
printk(BIOS_DEBUG, "BSP Family_Model: %08x\n", val);
printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
printk(BIOS_DEBUG, "bsp_apicid = %02x\n", bsp_apicid);
printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx\n", cpu_init_detectedx);
/* Setup sysinfo defaults */
set_sysinfo_in_ram(0);
update_microcode(val);
post_code(0x33);
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);
post_code(0x35);
/* Setup nodes PCI space and start core 0 AP init. */
finalize_node_setup(sysinfo);
/* Setup any mainboard PCI settings etc. */
setup_mb_resource_map();
post_code(0x36);
/* wait for all the APs core0 started by finalize_node_setup. */
/* FIXME: A bunch of cores are going to start output to serial at once.
It would be nice to fixup prink spinlocks for ROM XIP mode.
I think it could be done by putting the spinlock flag in the cache
of the BSP located right after sysinfo.
*/
wait_all_core0_started();
#if CONFIG_LOGICAL_CPUS
/* Core0 on each node is configured. Now setup any additional cores. */
printk(BIOS_DEBUG, "start_other_cores()\n");
start_other_cores();
post_code(0x37);
wait_all_other_cores_started(bsp_apicid);
#endif
post_code(0x38);
/* run _early_setup before soft-reset. */
rs780_early_setup();
sb7xx_51xx_early_setup();
#if CONFIG_SET_FIDVID
msr = rdmsr(0xc0010071);
printk(BIOS_DEBUG, "\nBegin FIDVID MSR 0xc0010071 0x%08x 0x%08x\n", msr.hi, msr.lo);
/* FIXME: The sb fid change may survive the warm reset and only
need to be done once.*/
enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
post_code(0x39);
if (!warm_reset_detect(0)) { // BSP is node 0
//.........这里部分代码省略.........
示例7: init_amd
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
int mbytes = num_physpages >> (20-PAGE_SHIFT);
int r;
#ifdef CONFIG_SMP
unsigned long long value;
/* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
*
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
if (c->x86 == 15) {
rdmsrl(MSR_K7_HWCR, value);
value |= 1 << 6;
wrmsrl(MSR_K7_HWCR, value);
}
#endif
early_init_amd(c);
/*
* FIXME: We should handle the K5 here. Set up the write
* range and also turn on MSR 83 bits 4 and 31 (write alloc,
* no bus pipeline)
*/
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, c->x86_capability);
r = get_model_name(c);
switch(c->x86)
{
case 4:
/*
* General Systems BIOSen alias the cpu frequency registers
* of the Elan at 0x000df000. Unfortuantly, one of the Linux
* drivers subsequently pokes it, and changes the CPU speed.
* Workaround : Remove the unneeded alias.
*/
#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
#define CBAR_ENB (0x80000000)
#define CBAR_KEY (0X000000CB)
if (c->x86_model==9 || c->x86_model == 10) {
if (inl (CBAR) & CBAR_ENB)
outl (0 | CBAR_KEY, CBAR);
}
break;
case 5:
if( c->x86_model < 6 )
{
/* Based on AMD doc 20734R - June 2000 */
if ( c->x86_model == 0 ) {
clear_bit(X86_FEATURE_APIC, c->x86_capability);
set_bit(X86_FEATURE_PGE, c->x86_capability);
}
break;
}
if ( c->x86_model == 6 && c->x86_mask == 1 ) {
const int K6_BUG_LOOP = 1000000;
int n;
void (*f_vide)(void);
unsigned long d, d2;
printk(KERN_INFO "AMD K6 stepping B detected - ");
/*
* It looks like AMD fixed the 2.6.2 bug and improved indirect
* calls at the same time.
*/
n = K6_BUG_LOOP;
f_vide = vide;
rdtscl(d);
while (n--)
f_vide();
rdtscl(d2);
d = d2-d;
if (d > 20*K6_BUG_LOOP)
printk("system stability may be impaired when more than 32 MB are used.\n");
else
printk("probably OK (after B9730xxxx).\n");
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
}
/* K6 with old style WHCR */
if (c->x86_model < 8 ||
(c->x86_model== 8 && c->x86_mask < 8)) {
/* We can only write allocate on the low 508Mb */
if(mbytes>508)
mbytes=508;
rdmsr(MSR_K6_WHCR, l, h);
//.........这里部分代码省略.........
示例8: detect_init_APIC
static int __init detect_init_APIC (void)
{
u32 h, l, features;
extern void get_cpu_vendor(struct cpuinfo_x86*);
/* Disabled by DMI scan or kernel option? */
if (dont_enable_local_apic)
return -1;
/* Workaround for us being called before identify_cpu(). */
get_cpu_vendor(&boot_cpu_data);
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
break;
if (boot_cpu_data.x86 == 15 && cpu_has_apic)
break;
goto no_apic;
case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 == 6 ||
(boot_cpu_data.x86 == 15 && cpu_has_apic) ||
(boot_cpu_data.x86 == 5 && cpu_has_apic))
break;
goto no_apic;
default:
goto no_apic;
}
if (!cpu_has_apic) {
/*
* Some BIOSes disable the local APIC in the
* APIC_BASE MSR. This can only be done in
* software for Intel P6 and AMD K7 (Model > 1).
*/
rdmsr(MSR_IA32_APICBASE, l, h);
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
printk("Local APIC disabled by BIOS -- reenabling.\n");
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
wrmsr(MSR_IA32_APICBASE, l, h);
}
}
/*
* The APIC feature bit should now be enabled
* in `cpuid'
*/
features = cpuid_edx(1);
if (!(features & (1 << X86_FEATURE_APIC))) {
printk("Could not enable APIC!\n");
return -1;
}
set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
if (nmi_watchdog != NMI_NONE)
nmi_watchdog = NMI_LOCAL_APIC;
printk("Found and enabled local APIC!\n");
apic_pm_init1();
return 0;
no_apic:
printk("No local APIC present or hardware disabled\n");
return -1;
}
示例9: trap
/*
* All traps come here. It is slower to have all traps call trap()
* rather than directly vectoring the handler. However, this avoids a
* lot of code duplication and possible bugs. The only exception is
* VectorSYSCALL.
* Trap is called with interrupts disabled via interrupt-gates.
*/
void
trap(Ureg* ureg)
{
int clockintr, vno, user;
// cache the previous vno to see what might be causing
// trouble
vno = ureg->type;
uint64_t gsbase = rdmsr(GSbase);
//if (sce > scx) iprint("====================");
lastvno = vno;
if (gsbase < 1ULL<<63)
die("bogus gsbase");
Proc *up = externup();
char buf[ERRMAX];
Vctl *ctl, *v;
machp()->perf.intrts = perfticks();
user = userureg(ureg);
if(user && (machp()->NIX.nixtype == NIXTC)) {
up->dbgreg = ureg;
cycles(&up->kentry);
}
clockintr = 0;
//_pmcupdate(machp());
if(ctl = vctl[vno]) {
if(ctl->isintr) {
machp()->intr++;
if(vno >= VectorPIC && vno != VectorSYSCALL)
machp()->lastintr = ctl->Vkey.irq;
} else if(up)
up->nqtrap++;
if(ctl->isr) {
ctl->isr(vno);
if(islo())print("trap %d: isr %p enabled interrupts\n", vno, ctl->isr);
}
for(v = ctl; v != nil; v = v->next) {
if(v->f) {
v->f(ureg, v->a);
if(islo())print("trap %d: ctlf %p enabled interrupts\n", vno, v->f);
}
}
if(ctl->eoi) {
ctl->eoi(vno);
if(islo())print("trap %d: eoi %p enabled interrupts\n", vno, ctl->eoi);
}
intrtime(vno);
if(ctl->isintr) {
if(ctl->Vkey.irq == IrqCLOCK || ctl->Vkey.irq == IrqTIMER)
clockintr = 1;
if (ctl->Vkey.irq == IrqTIMER)
oprof_alarm_handler(ureg);
if(up && !clockintr)
preempted();
}
}
else if(vno < nelem(excname) && user) {
spllo();
snprint(buf, sizeof buf, "sys: trap: %s", excname[vno]);
postnote(up, 1, buf, NDebug);
}
else if(vno >= VectorPIC && vno != VectorSYSCALL) {
/*
* An unknown interrupt.
* Check for a default IRQ7. This can happen when
* the IRQ input goes away before the acknowledge.
* In this case, a 'default IRQ7' is generated, but
* the corresponding bit in the ISR isn't set.
* In fact, just ignore all such interrupts.
*/
/* clear the interrupt */
i8259isr(vno);
iprint("cpu%d: spurious interrupt %d, last %d\n",
machp()->machno, vno, machp()->lastintr);
intrtime(vno);
if(user)
kexit(ureg);
return;
}
else {
if(vno == VectorNMI) {
nmienable();
if(machp()->machno != 0) {
iprint("cpu%d: PC %#llx\n",
machp()->machno, ureg->ip);
//.........这里部分代码省略.........
示例10: reg_script_read_res
//.........这里部分代码省略.........
iosf_port5a_write(step->reg, step->value);
break;
case IOSF_PORT_USHPHY:
iosf_ushphy_write(step->reg, step->value);
break;
case IOSF_PORT_SCC:
iosf_scc_write(step->reg, step->value);
break;
case IOSF_PORT_LPSS:
iosf_lpss_write(step->reg, step->value);
break;
case IOSF_PORT_0xa2:
iosf_porta2_write(step->reg, step->value);
break;
case IOSF_PORT_CCU:
iosf_ccu_write(step->reg, step->value);
break;
case IOSF_PORT_SSUS:
iosf_ssus_write(step->reg, step->value);
break;
default:
printk(BIOS_DEBUG, "No write support for IOSF port 0x%x.\n",
step->id);
break;
}
}
#endif
static uint64_t reg_script_read_msr(struct reg_script_context *ctx)
{
#if CONFIG_ARCH_X86
const struct reg_script *step = reg_script_get_step(ctx);
msr_t msr = rdmsr(step->reg);
uint64_t value = msr.hi;
value = msr.hi;
value <<= 32;
value |= msr.lo;
return value;
#endif
}
static void reg_script_write_msr(struct reg_script_context *ctx)
{
#if CONFIG_ARCH_X86
const struct reg_script *step = reg_script_get_step(ctx);
msr_t msr;
msr.hi = step->value >> 32;
msr.lo = step->value & 0xffffffff;
wrmsr(step->reg, msr);
#endif
}
#ifndef __PRE_RAM__
/* Default routine provided for systems without platform specific busses */
const struct reg_script_bus_entry *__attribute__((weak))
platform_bus_table(size_t *table_entries)
{
/* No platform bus type table supplied */
*table_entries = 0;
return NULL;
}
/* Locate the structure containing the platform specific bus access routines */
static const struct reg_script_bus_entry
*find_bus(const struct reg_script *step)
示例11: p4_setup_ctrs
static void p4_setup_ctrs(struct op_msrs const * const msrs)
{
unsigned int i;
unsigned int low, high;
unsigned int addr;
unsigned int stag;
stag = get_stagger();
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (! MISC_PMC_ENABLED_P(low)) {
printk(KERN_ERR "oprofile: P4 PMC not available\n");
return;
}
/* clear the cccrs we will use */
for (i = 0 ; i < num_counters ; i++) {
rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_CLEAR(low);
CCCR_SET_REQUIRED_BITS(low);
wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
}
/* clear cccrs outside our concern */
for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) {
rdmsr(p4_unused_cccr[i], low, high);
CCCR_CLEAR(low);
CCCR_SET_REQUIRED_BITS(low);
wrmsr(p4_unused_cccr[i], low, high);
}
/* clear all escrs (including those outside our concern) */
for (addr = MSR_P4_BSU_ESCR0 + stag;
addr <= MSR_P4_SSU_ESCR0; addr += addr_increment()) {
wrmsr(addr, 0, 0);
}
for (addr = MSR_P4_MS_ESCR0 + stag;
addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){
wrmsr(addr, 0, 0);
}
for (addr = MSR_P4_IX_ESCR0 + stag;
addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){
wrmsr(addr, 0, 0);
}
if (num_counters == NUM_COUNTERS_NON_HT) {
wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
} else if (stag == 0) {
wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
} else {
wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
}
/* setup all counters */
for (i = 0 ; i < num_counters ; ++i) {
if (counter_config[i].event) {
reset_value[i] = counter_config[i].count;
pmc_setup_one_p4_counter(i);
CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
} else {
reset_value[i] = 0;
}
}
}
示例12: getctr
static uint64_t
getctr(uint32_t regno)
{
return rdmsr(regno + PerfCtrbase);
}
示例13: pn_decode_acpi
static int
pn_decode_acpi(device_t dev, device_t perf_dev)
{
int i, j, n;
uint64_t status;
uint32_t ctrl;
u_int cpuid;
u_int regs[4];
struct pn_softc *sc;
struct powernow_state state;
struct cf_setting sets[POWERNOW_MAX_STATES];
int count = POWERNOW_MAX_STATES;
int type;
int rv;
if (perf_dev == NULL)
return (ENXIO);
rv = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count);
if (rv)
return (ENXIO);
rv = CPUFREQ_DRV_TYPE(perf_dev, &type);
if (rv || (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
return (ENXIO);
sc = device_get_softc(dev);
do_cpuid(0x80000001, regs);
cpuid = regs[0];
if ((cpuid & 0xfff) == 0x760)
sc->errata |= A0_ERRATA;
ctrl = 0;
sc->sgtc = 0;
for (n = 0, i = 0; i < count; ++i) {
ctrl = sets[i].spec[PX_SPEC_CONTROL];
switch (sc->pn_type) {
case PN7_TYPE:
state.fid = ACPI_PN7_CTRL_TO_FID(ctrl);
state.vid = ACPI_PN7_CTRL_TO_VID(ctrl);
if ((sc->errata & A0_ERRATA) &&
(pn7_fid_to_mult[state.fid] % 10) == 5)
continue;
break;
case PN8_TYPE:
state.fid = ACPI_PN8_CTRL_TO_FID(ctrl);
state.vid = ACPI_PN8_CTRL_TO_VID(ctrl);
break;
}
state.freq = sets[i].freq * 1000;
state.power = sets[i].power;
j = n;
while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) {
memcpy(&sc->powernow_states[j],
&sc->powernow_states[j - 1],
sizeof(struct powernow_state));
--j;
}
memcpy(&sc->powernow_states[j], &state,
sizeof(struct powernow_state));
++n;
}
sc->powernow_max_states = n;
state = sc->powernow_states[0];
status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
switch (sc->pn_type) {
case PN7_TYPE:
sc->sgtc = ACPI_PN7_CTRL_TO_SGTC(ctrl);
/*
* XXX Some bios forget the max frequency!
* This maybe indicates we have the wrong tables. Therefore,
* don't implement a quirk, but fallback to BIOS legacy
* tables instead.
*/
if (PN7_STA_MFID(status) != state.fid) {
device_printf(dev, "ACPI MAX frequency not found\n");
return (EINVAL);
}
sc->fsb = state.freq / 100 / pn7_fid_to_mult[state.fid];
break;
case PN8_TYPE:
sc->vst = ACPI_PN8_CTRL_TO_VST(ctrl),
sc->mvs = ACPI_PN8_CTRL_TO_MVS(ctrl),
sc->pll = ACPI_PN8_CTRL_TO_PLL(ctrl),
sc->rvo = ACPI_PN8_CTRL_TO_RVO(ctrl),
sc->irt = ACPI_PN8_CTRL_TO_IRT(ctrl);
sc->low = 0; /* XXX */
/*
* powernow k8 supports only one low frequency.
*/
if (sc->powernow_max_states >= 2 &&
(sc->powernow_states[sc->powernow_max_states - 2].fid < 8))
return (EINVAL);
sc->fsb = state.freq / 100 / pn8_fid_to_mult[state.fid];
break;
}
//.........这里部分代码省略.........
示例14: cache_as_ram_main
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
struct sys_info *sysinfo = (struct sys_info *)(CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE);
u32 bsp_apicid = 0;
u32 val;
msr_t msr;
if (!cpu_init_detectedx && boot_cpu()) {
/* Nothing special needs to be done to find bus 0 */
/* Allow the HT devices to be found */
/* mov bsp to bus 0xff when > 8 nodes */
set_bsp_node_CHtExtNodeCfgEn();
enumerate_ht_chain();
/* Setup the rom access for 4M */
amd8111_enable_rom();
}
post_code(0x30);
if (bist == 0) {
bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo); /* mmconf is inited in init_cpus */
/* All cores run this but the BSP(node0,core0) is the only core that returns. */
}
post_code(0x32);
w83627hf_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
uart_init();
console_init();
printk(BIOS_DEBUG, "\n");
// dump_mem(CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE-0x200, CONFIG_DCACHE_RAM_BASE+CONFIG_DCACHE_RAM_SIZE);
/* Halt if there was a built in self test failure */
report_bist_failure(bist);
// Load MPB
val = cpuid_eax(1);
printk(BIOS_DEBUG, "BSP Family_Model: %08x \n", val);
printk(BIOS_DEBUG, "*sysinfo range: [%p,%p]\n",sysinfo,sysinfo+1);
printk(BIOS_DEBUG, "bsp_apicid = %02x \n", bsp_apicid);
printk(BIOS_DEBUG, "cpu_init_detectedx = %08lx \n", cpu_init_detectedx);
/* Setup sysinfo defaults */
set_sysinfo_in_ram(0);
update_microcode(val);
post_code(0x33);
cpuSetAMDMSR();
post_code(0x34);
amd_ht_init(sysinfo);
post_code(0x35);
/* Setup nodes PCI space and start core 0 AP init. */
finalize_node_setup(sysinfo);
/* Setup any mainboard PCI settings etc. */
setup_mb_resource_map();
post_code(0x36);
/* wait for all the APs core0 started by finalize_node_setup. */
/* FIXME: A bunch of cores are going to start output to serial at once.
It would be nice to fixup prink spinlocks for ROM XIP mode.
I think it could be done by putting the spinlock flag in the cache
of the BSP located right after sysinfo.
*/
wait_all_core0_started();
#if CONFIG_LOGICAL_CPUS==1
/* Core0 on each node is configured. Now setup any additional cores. */
printk(BIOS_DEBUG, "start_other_cores()\n");
start_other_cores();
post_code(0x37);
wait_all_other_cores_started(bsp_apicid);
#endif
post_code(0x38);
#if SET_FIDVID == 1
msr = rdmsr(0xc0010071);
printk(BIOS_DEBUG, "\nBegin FIDVID MSR 0xc0010071 0x%08x 0x%08x \n", msr.hi, msr.lo);
/* FIXME: The sb fid change may survive the warm reset and only
need to be done once.*/
enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
post_code(0x39);
if (!warm_reset_detect(0)) { // BSP is node 0
init_fidvid_bsp(bsp_apicid, sysinfo->nodes);
} else {
init_fidvid_stage2(bsp_apicid, 0); // BSP is node 0
}
post_code(0x3A);
//.........这里部分代码省略.........
示例15: cache_as_ram_main
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
static const u16 spd_addr[] = { DIMM0, 0, 0, 0, DIMM1, 0, 0, 0, };
int needs_reset = 0;
u32 bsp_apicid = 0;
msr_t msr;
struct cpuid_result cpuid1;
struct sys_info *sysinfo = &sysinfo_car;
if (!cpu_init_detectedx && boot_cpu()) {
/* Nothing special needs to be done to find bus 0 */
/* Allow the HT devices to be found */
enumerate_ht_chain();
/* sb600_lpc_port80(); */
sb600_pci_port80();
}
if (bist == 0)
bsp_apicid = init_cpus(cpu_init_detectedx, sysinfo);
enable_rs690_dev8();
sb600_lpc_init();
ite_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
it8712f_kill_watchdog();
console_init();
/* Halt if there was a built in self test failure */
report_bist_failure(bist);
printk(BIOS_DEBUG, "bsp_apicid=0x%x\n", bsp_apicid);
setup_tim8690_resource_map();
setup_coherent_ht_domain();
#if CONFIG_LOGICAL_CPUS
/* It is said that we should start core1 after all core0 launched */
wait_all_core0_started();
start_other_cores();
#endif
wait_all_aps_started(bsp_apicid);
ht_setup_chains_x(sysinfo);
/* run _early_setup before soft-reset. */
rs690_early_setup();
sb600_early_setup();
/* Check to see if processor is capable of changing FIDVID */
/* otherwise it will throw a GP# when reading FIDVID_STATUS */
cpuid1 = cpuid(0x80000007);
if ((cpuid1.edx & 0x6) == 0x6 ) {
/* Read FIDVID_STATUS */
msr=rdmsr(0xc0010042);
printk(BIOS_DEBUG, "begin msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);
enable_fid_change();
enable_fid_change_on_sb(sysinfo->sbbusn, sysinfo->sbdn);
init_fidvid_bsp(bsp_apicid);
/* show final fid and vid */
msr=rdmsr(0xc0010042);
printk(BIOS_DEBUG, "end msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);
} else {
printk(BIOS_DEBUG, "Changing FIDVID not supported\n");
}
needs_reset = optimize_link_coherent_ht();
needs_reset |= optimize_link_incoherent_ht(sysinfo);
rs690_htinit();
printk(BIOS_DEBUG, "needs_reset=0x%x\n", needs_reset);
if (needs_reset) {
print_info("ht reset -\n");
soft_reset();
}
allow_all_aps_stop(bsp_apicid);
/* It's the time to set ctrl now; */
printk(BIOS_DEBUG, "sysinfo->nodes: %2x sysinfo->ctrl: %p spd_addr: %p\n",
sysinfo->nodes, sysinfo->ctrl, spd_addr);
fill_mem_ctrl(sysinfo->nodes, sysinfo->ctrl, spd_addr);
sdram_initialize(sysinfo->nodes, sysinfo->ctrl, sysinfo);
rs690_before_pci_init();
sb600_before_pci_init();
post_cache_as_ram();
}