本文整理汇总了C++中write_aux_reg函数的典型用法代码示例。如果您正苦于以下问题:C++ write_aux_reg函数的具体用法?C++ write_aux_reg怎么用?C++ write_aux_reg使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write_aux_reg函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: eznps_init_core
static void eznps_init_core(unsigned int cpu)
{
u32 sync_value;
struct nps_host_reg_aux_hw_comply hw_comply;
struct nps_host_reg_aux_lpc lpc;
if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
return;
hw_comply.value = read_aux_reg(AUX_REG_HW_COMPLY);
hw_comply.me = 1;
hw_comply.le = 1;
#ifdef CONFIG_EZNPS_SHARED_TIMER
hw_comply.te = 1;
#endif
write_aux_reg(AUX_REG_HW_COMPLY, hw_comply.value);
/* Enable MMU clock */
lpc.mep = 1;
write_aux_reg(CTOP_AUX_LPC, lpc.value);
/* Boot CPU only */
if (!cpu) {
/* Write to general purpose register in CRG */
sync_value = ioread32be(REG_GEN_PURP_0);
sync_value |= NPS_CRG_SYNC_BIT;
iowrite32be(sync_value, REG_GEN_PURP_0);
}
}
示例2: arc_timer_event_setup
/*
* Arm the timer to interrupt after @limit cycles
* The distinction for oneshot/periodic is done in arc_event_timer_ack() below
*/
static void arc_timer_event_setup(unsigned int limit)
{
write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
}
示例3: tlb_entry_insert
static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
{
unsigned int idx;
/*
* First verify if entry for this vaddr+ASID already exists
* This also sets up PD0 (vaddr, ASID..) for final commit
*/
idx = tlb_entry_lkup(pd0);
/*
* If Not already present get a free slot from MMU.
* Otherwise, Probe would have located the entry and set INDEX Reg
* with existing location. This will cause Write CMD to over-write
* existing entry with new PD0 and PD1
*/
if (likely(idx & TLB_LKUP_ERR))
write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
/* setup the other half of TLB entry (pfn, rwx..) */
write_aux_reg(ARC_REG_TLBPD1, pd1);
/*
* Commit the Entry to MMU
* It doesnt sound safe to use the TLBWriteNI cmd here
* which doesn't flush uTLBs. I'd rather be safe than sorry.
*/
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
示例4: arc_mmu_init
void arc_mmu_init(void)
{
char str[256];
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
/* For efficiency sake, kernel is compile time built for a MMU ver
* This must match the hardware it is running on.
* Linux built for MMU V2, if run on MMU V1 will break down because V1
* hardware doesn't understand cmds such as WriteNI, or IVUTLB
* On the other hand, Linux built for V1 if run on MMU V2 will do
* un-needed workarounds to prevent memcpy thrashing.
* Similarly MMU V3 has new features which won't work on older MMU
*/
if (mmu->ver != CONFIG_ARC_MMU_VER) {
panic("MMU ver %d doesn't match kernel built for %d...\n",
mmu->ver, CONFIG_ARC_MMU_VER);
}
if (mmu->pg_sz != PAGE_SIZE)
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
/* Enable the MMU */
write_aux_reg(ARC_REG_PID, MMU_ENABLE);
/* In smp we use this reg for interrupt 1 scratch */
#ifndef CONFIG_SMP
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif
}
示例5: arc_init_IRQ
/*
* Early Hardware specific Interrupt setup
* -Called very early (start_kernel -> setup_arch -> setup_processor)
* -Platform Independent (must for any ARC Core)
* -Needed for each CPU (hence not foldable into init_IRQ)
*/
void arc_init_IRQ(void)
{
unsigned int tmp, irq_prio, i;
struct bcr_irq_arcv2 irq_bcr;
struct aux_irq_ctrl {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res3:18, save_idx_regs:1, res2:1,
save_u_to_u:1, save_lp_regs:1, save_blink:1,
res:4, save_nr_gpr_pairs:5;
#else
unsigned int save_nr_gpr_pairs:5, res:4,
save_blink:1, save_lp_regs:1, save_u_to_u:1,
res2:1, save_idx_regs:1, res3:18;
#endif
} ictrl;
*(unsigned int *)&ictrl = 0;
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
* ARCv2 core intc provides multiple interrupt priorities (upto 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
*/
READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
irq_bcr.firq ? " FIRQ (not used)":"");
/*
* Set a default priority for all available interrupts to prevent
* switching of register banks if Fast IRQ and multiple register banks
* are supported by CPU.
* Also disable all IRQ lines so faulty external hardware won't
* trigger interrupt that kernel is not ready to handle.
*/
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
write_aux_reg(AUX_IRQ_SELECT, i);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
/* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(ARC_REG_STATUS32);
tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp));
}
示例6: utlb_invalidate
static void utlb_invalidate(void)
{
#if (CONFIG_ARC_MMU_VER >= 2)
#if (CONFIG_ARC_MMU_VER == 2)
/* MMU v2 introduced the uTLB Flush command.
* There was however an obscure hardware bug, where uTLB flush would
* fail when a prior probe for J-TLB (both totally unrelated) would
* return lkup err - because the entry didnt exist in MMU.
* The Workround was to set Index reg with some valid value, prior to
* flush. This was fixed in MMU v3 hence not needed any more
*/
unsigned int idx;
/* make sure INDEX Reg is valid */
idx = read_aux_reg(ARC_REG_TLBINDEX);
/* If not write some dummy val */
if (unlikely(idx & TLB_LKUP_ERR))
write_aux_reg(ARC_REG_TLBINDEX, 0xa);
#endif
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
#endif
}
示例7: __cache_line_loop
/*
* Common Helper for Line Operations on {I,D}-Cache
*/
static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned int aux_cmd, aux_tag;
int num_lines;
const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (cacheop == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
#if (CONFIG_ARC_MMU_VER > 2)
aux_tag = ARC_REG_IC_PTAG;
#endif
}
else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
#if (CONFIG_ARC_MMU_VER > 2)
aux_tag = ARC_REG_DC_PTAG;
#endif
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @paddr - aligned to cache line and integral @num_lines.
* This however can be avoided for page sized since:
* [email protected] will be cache-line aligned already (being page aligned)
* [email protected] will be integral multiple of line size (being page sized).
*/
if (!full_page_op) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
vaddr &= CACHE_LINE_MASK;
}
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
#if (CONFIG_ARC_MMU_VER <= 2)
/* MMUv2 and before: paddr contains stuffed vaddrs bits */
paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#else
/* if V-P const for loop, PTAG can be written once outside loop */
if (full_page_op)
write_aux_reg(aux_tag, paddr);
#endif
while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
/* MMUv3, cache ops require paddr seperately */
if (!full_page_op) {
write_aux_reg(aux_tag, paddr);
paddr += L1_CACHE_BYTES;
}
write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES;
#else
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
#endif
}
}
示例8: __dc_entire_op
/*
* Operation on Entire D-Cache
* @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
* Note that constant propagation ensures all the checks are gone
* in generated code
*/
static inline void __dc_entire_op(const int cacheop)
{
unsigned int tmp = tmp;
int aux;
if (cacheop == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* Default INV sub-mode is DISCARD, which needs to be toggled
*/
tmp = read_aux_reg(ARC_REG_DC_CTRL);
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
}
if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
aux = ARC_REG_DC_IVDC;
else
aux = ARC_REG_DC_FLSH;
write_aux_reg(aux, 0x1);
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
wait_for_flush();
/* Switch back the DISCARD ONLY Invalidate mode */
if (cacheop == OP_FLUSH_N_INV)
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
}
示例9: __dc_line_op
/*
* D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
*/
static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned long flags, tmp = tmp;
local_irq_save(flags);
if (cacheop == OP_FLUSH_N_INV) {
/*
* Dcache provides 2 cmd: FLUSH or INV
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* Default INV sub-mode is DISCARD, which needs to be toggled
*/
tmp = read_aux_reg(ARC_REG_DC_CTRL);
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
}
__cache_line_loop(paddr, vaddr, sz, cacheop);
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
wait_for_flush();
/* Switch back the DISCARD ONLY Invalidate mode */
if (cacheop == OP_FLUSH_N_INV)
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
local_irq_restore(flags);
}
示例10: arc_counter_setup
/*
* set 32bit TIMER1 to keep counting monotonically and wraparound
*/
int __cpuinit arc_counter_setup(void)
{
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
return is_usable_as_clocksource();
}
示例11: arc_counter_setup
/*
* set 32bit TIMER1 to keep counting monotonically and wraparound
*/
int arc_counter_setup(void)
{
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
/* Not usable in SMP */
return !IS_ENABLED(CONFIG_SMP);
}
示例12: tlb_entry_lkup
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
{
unsigned int idx;
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
idx = read_aux_reg(ARC_REG_TLBINDEX);
return idx;
}
示例13: arcv2_irq_enable
void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
* hw auto enables (linux unmask) all by default
* So no need to do IRQ_ENABLE here
* XXX: However OSCI LAN need it
*/
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
示例14: iss_model_smp_wakeup_cpu
/*
* Master kick starting another CPU
*/
static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
{
/* setup the start PC */
write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
/* Trigger WRITE_PC cmd for this cpu */
write_aux_reg(ARC_AUX_XTL_REG_CMD,
(ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
/* Take the cpu out of Halt */
write_aux_reg(ARC_AUX_XTL_REG_CMD,
(ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
}
示例15: __cache_line_loop_v4
/*
* In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
* maintenance ops (in IVIL reg), as long as icache doesn't alias.
*
* For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
* specified in PTAG (similar to MMU v3)
*/
static inline
void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned int aux_cmd;
int num_lines;
const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (cacheop == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
} else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @paddr - aligned to cache line and integral @num_lines.
* This however can be avoided for page sized since:
* [email protected] will be cache-line aligned already (being page aligned)
* [email protected] will be integral multiple of line size (being page sized).
*/
if (!full_page_op) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
}
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
while (num_lines-- > 0) {
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
}
}