本文整理汇总了C++中read_c0_entryhi函数的典型用法代码示例。如果您正苦于以下问题:C++ read_c0_entryhi函数的具体用法?C++ read_c0_entryhi怎么用?C++ read_c0_entryhi使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_c0_entryhi函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: arch_vcpu_regs_init
int arch_vcpu_regs_init(struct vmm_vcpu *vcpu)
{
vmm_memset(mips_uregs(vcpu), 0, sizeof(arch_regs_t));
if (!vcpu->is_normal) {
/* For orphan vcpu */
mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;
mips_uregs(vcpu)->regs[SP_IDX] = vcpu->start_sp;
mips_uregs(vcpu)->regs[S8_IDX] = mips_uregs(vcpu)->regs[SP_IDX];
mips_uregs(vcpu)->cp0_status = read_c0_status();
mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
} else {
/* For normal vcpu running guests */
mips_sregs(vcpu)->cp0_regs[CP0_CAUSE_IDX] = 0x400;
mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] = 0x40004;
mips_uregs(vcpu)->cp0_status = read_c0_status() | (0x01UL << CP0_STATUS_UM_SHIFT);
mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
mips_uregs(vcpu)->cp0_entryhi &= ASID_MASK;
mips_uregs(vcpu)->cp0_entryhi |= (0x2 << ASID_SHIFT);
mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;
/* All guest run from 0 and fault */
mips_sregs(vcpu)->cp0_regs[CP0_EPC_IDX] = vcpu->start_pc;
/* Give guest the same CPU cap as we have */
mips_sregs(vcpu)->cp0_regs[CP0_PRID_IDX] = read_c0_prid();
}
return VMM_OK;
}
示例2: local_flush_tlb_all
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
unsigned long entry;
#ifdef DEBUG_TLB
printk("[tlball]");
#endif
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi() & ASID_MASK;
write_c0_entryhi(CKSEG0);
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = read_c0_wired();
/* Blast 'em all away. */
while (entry < NTLB_ENTRIES) {
write_c0_index(entry);
tlb_write_indexed();
entry++;
}
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
}
示例3: local_flush_tlb_all
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry;
ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = read_c0_wired();
/* Blast 'em all away. */
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
示例4: tx4939_proc_show_cp0
static int
tx4939_proc_show_cp0(char *sysbuf, char **start, off_t off,
int count, int *eof, void *data)
{
int len = 0;
len += sprintf(sysbuf + len, "INDEX :0x%08x\n", read_c0_index());
len += sprintf(sysbuf + len, "ENTRYLO0:0x%08lx\n", read_c0_entrylo0());
len += sprintf(sysbuf + len, "ENTRYLO1:0x%08lx\n", read_c0_entrylo1());
len += sprintf(sysbuf + len, "CONTEXT :0x%08lx\n", read_c0_context());
len += sprintf(sysbuf + len, "PAGEMASK:0x%08x\n", read_c0_pagemask());
len += sprintf(sysbuf + len, "WIRED :0x%08x\n", read_c0_wired());
len += sprintf(sysbuf + len, "COUNT :0x%08x\n", read_c0_count());
len += sprintf(sysbuf + len, "ENTRYHI :0x%08lx\n", read_c0_entryhi());
len += sprintf(sysbuf + len, "COMPARE :0x%08x\n", read_c0_compare());
len += sprintf(sysbuf + len, "STATUS :0x%08x\n", read_c0_status());
len += sprintf(sysbuf + len, "CAUSE :0x%08x\n", read_c0_cause());
len += sprintf(sysbuf + len, "PRId :0x%08x\n", read_c0_prid());
len += sprintf(sysbuf + len, "CONFIG :0x%08x\n", read_c0_config());
len += sprintf(sysbuf + len, "XCONTEXT:0x%08lx\n", read_c0_xcontext());
len += sprintf(sysbuf + len, "TagLo :0x%08x\n", read_c0_taglo());
len += sprintf(sysbuf + len, "TagHi :0x%08x\n", read_c0_taghi());
len += sprintf(sysbuf + len, "ErrorEPC:0x%08lx\n", read_c0_errorepc());
*eof = 1;
return len;
}
示例5: local_flush_tlb_page
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
unsigned long flags;
int oldpid, newpid;
signed long idx;
if (!cpu_context(cpu, vma->vm_mm))
return;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= PAGE_MASK;
local_irq_save(flags);
oldpid = read_c0_entryhi();
write_c0_vaddr(page);
write_c0_entryhi(newpid);
tlb_probe();
idx = read_c0_tlbset();
if (idx < 0)
goto finish;
write_c0_entrylo(0);
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
tlb_write();
finish:
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
示例6: __update_tlb
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
int pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = read_c0_entryhi() & ASID_MASK;
local_irq_save(flags);
address &= PAGE_MASK;
write_c0_vaddr(address);
write_c0_entryhi(pid);
pgdp = pgd_offset(vma->vm_mm, address);
pmdp = pmd_offset(pgdp, address);
ptep = pte_offset_map(pmdp, address);
tlb_probe();
write_c0_entrylo(pte_val(*ptep++) >> 6);
tlb_write();
write_c0_entryhi(pid);
local_irq_restore(flags);
}
示例7: local_flush_tlb_all
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry;
#ifdef DEBUG_TLB
printk("[tlball]");
#endif
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = (read_c0_entryhi() & ASID_MASK);
write_c0_entryhi(XKPHYS);
write_c0_entrylo0(0);
write_c0_entrylo1(0);
BARRIER;
entry = read_c0_wired();
/* Blast 'em all away. */
while(entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(XKPHYS+entry*0x2000);
write_c0_index(entry);
BARRIER;
tlb_write_indexed();
BARRIER;
entry++;
}
BARRIER;
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
}
示例8: do_general_exception
u32 do_general_exception(arch_regs_t *uregs)
{
u32 cp0_cause = read_c0_cause();
u32 cp0_status = read_c0_status();
mips32_entryhi_t ehi;
u32 victim_asid;
u32 victim_inst;
struct vmm_vcpu *c_vcpu;
u8 delay_slot_exception = IS_BD_SET(cp0_cause);
ehi._entryhi = read_c0_entryhi();
victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT;
c_vcpu = vmm_scheduler_current_vcpu();
/*
* When exception is happening in the delay slot. We need to emulate
* the corresponding branch instruction first. If its one of the "likely"
* instructions, we don't need to emulate the faulting instruction since
* "likely" instructions don't allow slot to be executed if branch is not
* taken.
*/
if (delay_slot_exception) {
victim_inst = *((u32 *)(uregs->cp0_epc + 4));
/*
* If this function returns zero, the branch instruction was a
* "likely" instruction and the branch wasn't taken. So don't
* execute the delay slot, just return. The correct EPC to return
* to will be programmed under our feet.
*/
if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) {
return VMM_OK;
}
} else {
victim_inst = *((u32 *)uregs->cp0_epc);
}
switch (EXCEPTION_CAUSE(cp0_cause)) {
case EXEC_CODE_COPU:
cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs);
if (!delay_slot_exception)
uregs->cp0_epc += 4;
break;
case EXEC_CODE_TLBL:
if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) {
ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
write_c0_entryhi(ehi._entryhi);
vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n");
}
break;
}
return VMM_OK;
}
示例9: print_registers
static void print_registers(struct pt_regs *regs)
{
printk("Panic status %p, cause %p, epc %p,bd %p, entry hi %x\n",
regs->cp0_status,
regs->cp0_cause,
regs->cp0_epc,
read_c0_badvaddr(),
read_c0_entryhi());
}
示例10: dump_tlb
void
dump_tlb(int first, int last)
{
int i;
unsigned int asid;
unsigned long entryhi, entrylo0;
asid = read_c0_entryhi() & 0xfc0;
for(i=first;i<=last;i++)
{
write_c0_index(i<<8);
__asm__ __volatile__(
".set\tnoreorder\n\t"
"tlbr\n\t"
"nop\n\t"
".set\treorder");
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & 0xffffe000) != 0x80000000
&& (entryhi & 0xfc0) == asid) {
/*
* Only print entries in use
*/
printk("Index: %2d ", i);
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
(entryhi & 0xffffe000),
entryhi & 0xfc0,
entrylo0 & PAGE_MASK,
(entrylo0 & (1 << 11)) ? 1 : 0,
(entrylo0 & (1 << 10)) ? 1 : 0,
(entrylo0 & (1 << 9)) ? 1 : 0,
(entrylo0 & (1 << 8)) ? 1 : 0);
}
}
printk("\n");
write_c0_entryhi(asid);
}
示例11: dump_tlb
static void dump_tlb(int first, int last)
{
int i;
unsigned int asid;
unsigned long entryhi, entrylo0;
asid = read_c0_entryhi() & ASID_MASK;
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
__asm__ __volatile__(
".set\tnoreorder\n\t"
"tlbr\n\t"
"nop\n\t"
".set\treorder");
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & PAGE_MASK) != KSEG0 &&
(entrylo0 & R3K_ENTRYLO_G ||
(entryhi & ASID_MASK) == asid)) {
/*
* Only print entries in use
*/
printk("Index: %2d ", i);
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK,
entryhi & ASID_MASK,
entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
write_c0_entryhi(asid);
}
示例12: ipu_add_wired_entry
static void ipu_add_wired_entry(unsigned long pid,
unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
struct task_struct *g, *p;
/* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */
wired = read_c0_wired();
if (wired) return;
do_each_thread(g, p) {
if (p->pid == pid )
g_asid = p->mm->context[0];
} while_each_thread(g, p);
local_irq_save(flags);
entrylo0 = entrylo0 >> 6; /* PFN */
entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi() & 0xff;
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
write_c0_wired(wired + 1);
write_c0_index(wired);
BARRIER;
entryhi &= ~0xff; /* new add, 20070906 */
entryhi |= g_asid; /* new add, 20070906 */
// entryhi |= old_ctx; /* new add, 20070906 */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
BARRIER;
tlb_write_indexed();
BARRIER;
write_c0_entryhi(old_ctx);
BARRIER;
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
#if defined(DEBUG)
printk("\nold_ctx=%03d\n", old_ctx);
show_tlb();
#endif
}
示例13: local_flush_tlb_range
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
unsigned long config6_flags;
ENTER_CRITICAL(flags);
disable_pgwalker(config6_flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= current_cpu_data.tlbsize/2) {
int oldpid = read_c0_entryhi();
int newpid = cpu_asid(cpu, mm);
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
while (start < end) {
int idx;
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
#else
__write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx)));
#endif
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm, cpu);
}
FLUSH_ITLB;
enable_pgwalker(config6_flags);
EXIT_CRITICAL(flags);
}
示例14: local_flush_tlb_all
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry, ftlbhighset;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = read_c0_wired();
/*
* Blast 'em all away.
* If there are any wired entries, fall back to iterating
*/
if (cpu_has_tlbinv && !entry) {
if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate VTLB */
}
ftlbhighset = current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets;
for (entry = current_cpu_data.tlbsizevtlb;
entry < ftlbhighset;
entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate one FTLB set */
}
} else {
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
示例15: local_flush_tlb_range
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long flags;
int size;
#ifdef DEBUG_TLB
printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK),
start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if(size <= current_cpu_data.tlbsize/2) {
int oldpid = read_c0_entryhi() & ASID_MASK;
int newpid = cpu_asid(cpu, mm);
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
while(start < end) {
int idx;
write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
BARRIER;
tlb_probe();
BARRIER;
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if(idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(XKPHYS+idx*0x2000);
BARRIER;
tlb_write_indexed();
BARRIER;
}
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm, cpu);
}
local_irq_restore(flags);
}