本文整理汇总了C++中MIPS_PHYS_TO_KSEG0函数的典型用法代码示例。如果您正苦于以下问题:C++ MIPS_PHYS_TO_KSEG0函数的具体用法?C++ MIPS_PHYS_TO_KSEG0怎么用?C++ MIPS_PHYS_TO_KSEG0使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MIPS_PHYS_TO_KSEG0函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tx3900_icache_sync_all_16
void
tx3900_icache_sync_all_16(void)
{
tx3900_icache_do_inv_index_16(MIPS_PHYS_TO_KSEG0(0),
MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size));
}
示例2: tx3920_icache_sync_all_16wb
void
tx3920_icache_sync_all_16wb(void)
{
mips_dcache_wbinv_all();
__asm volatile(".set push; .set mips2; sync; .set pop");
tx3920_icache_do_inv_16(MIPS_PHYS_TO_KSEG0(0),
MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size));
}
示例3: tx3900_icache_sync_range_16
void
tx3900_icache_sync_range_16(register_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
if ((eva - va) >= mips_cache_info.mci_picache_size) {
/* Just hit the whole thing. */
va = MIPS_PHYS_TO_KSEG0(0);
eva = MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size);
}
tx3900_icache_do_inv_index_16(va, eva);
}
示例4: r10k_icache_sync_range_index
void
r10k_icache_sync_range_index(vaddr_t va, vsize_t size)
{
vaddr_t eva, orig_va;
orig_va = va;
eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wbinv_range_index(va, (eva - va));
__asm volatile("sync");
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);
eva = round_line(va + size);
va = trunc_line(va);
while (va < eva) {
cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += 64;
}
}
示例5: tx3900_pdcache_wbinv_all_4
void
tx3900_pdcache_wbinv_all_4(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_cache_info.mci_pdcache_size;
volatile int *p;
/*
* No Index Invalidate for the TX3900 -- have to execute a
* series of load instructions from the dummy buffer, instead.
*/
p = tx3900_dummy_buffer;
while (va < eva) {
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
va += (32 * 4);
}
}
示例6: dmac3_start
void
dmac3_start(struct dmac3_softc *sc, vaddr_t addr, int len, int direction)
{
struct dmac3reg *reg = sc->sc_reg;
paddr_t pa;
vaddr_t start, end, v;
volatile uint32_t *p;
if (reg->csr & DMAC3_CSR_ENABLE)
dmac3_reset(sc);
start = mips_trunc_page(addr);
end = mips_round_page(addr + len);
p = sc->sc_dmamap;
for (v = start; v < end; v += PAGE_SIZE) {
pa = kvtophys(v);
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE);
*p++ = 0;
*p++ = (pa >> PGSHIFT) | 0xc0000000;
}
*p++ = 0;
*p++ = 0x003fffff;
addr &= PGOFSET;
addr += sc->sc_dmaaddr;
reg->len = len;
reg->addr = addr;
reg->intr = DMAC3_INTR_EOPIE | DMAC3_INTR_INTEN;
reg->csr = DMAC3_CSR_ENABLE | direction | BURST_MODE | APAD_MODE;
}
示例7: r4k_sdcache_wbinv_range_index_128
void
r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
{
vaddr_t eva;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));
eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 128)) {
cache_r4k_op_32lines_128(va,
CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 128);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += 128;
}
}
示例8: arc_bus_space_compose_handle
int
arc_bus_space_compose_handle(bus_space_tag_t bst, bus_addr_t addr,
bus_size_t size, int flags, bus_space_handle_t *bshp)
{
bus_space_handle_t bsh = bst->bs_vbase + (addr - bst->bs_start);
/*
* Since all buses can be linearly mappable, we don't have to check
* BUS_SPACE_MAP_LINEAR and BUS_SPACE_MAP_PREFETCHABLE.
*/
if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) {
*bshp = bsh;
return 0;
}
if (bsh < MIPS_KSEG1_START) /* KUSEG or KSEG0 */
panic("arc_bus_space_compose_handle: bad address 0x%x", bsh);
if (bsh < MIPS_KSEG2_START) { /* KSEG1 */
*bshp = MIPS_PHYS_TO_KSEG0(MIPS_KSEG1_TO_PHYS(bsh));
return 0;
}
/*
* KSEG2:
* Do not make the page cacheable in this case, since:
* - the page which this bus_space belongs might include
* other bus_spaces.
* or
* - this bus might be mapped by wired TLB, in that case,
* we cannot manupulate cacheable attribute with page granularity.
*/
#ifdef DIAGNOSTIC
printf("arc_bus_space_compose_handle: ignore cacheable 0x%x\n", bsh);
#endif
*bshp = bsh;
return 0;
}
示例9: platform_reset
/*
* Perform a board-level soft-reset.
* Note that this is not emulated by gxemul.
*/
void
platform_reset(void)
{
char *c;
c = (char *)MIPS_PHYS_TO_KSEG0(MALTA_SOFTRES);
*c = MALTA_GORESET;
}
示例10: r3k_icache_sync_all
void
r3k_icache_sync_all(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_cache_info.mci_picache_size;
r3k_picache_do_inv(va, eva);
}
示例11: cpu_uarea_alloc
void *
cpu_uarea_alloc(bool system)
{
struct pglist pglist;
#ifdef _LP64
const paddr_t high = mips_avail_end;
#else
const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
/*
* Don't allocate a direct mapped uarea if aren't allocating for a
* system lwp and we have memory that can't be mapped via KSEG0.
* If
*/
if (!system && high > mips_avail_end)
return NULL;
#endif
int error;
/*
* Allocate a new physically contiguous uarea which can be
* direct-mapped.
*/
error = uvm_pglistalloc(USPACE, mips_avail_start, high,
USPACE_ALIGN, 0, &pglist, 1, 1);
if (error) {
#ifdef _LP64
if (!system)
return NULL;
#endif
panic("%s: uvm_pglistalloc failed: %d", __func__, error);
}
/*
* Get the physical address from the first page.
*/
const struct vm_page * const pg = TAILQ_FIRST(&pglist);
KASSERT(pg != NULL);
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
KASSERTMSG(pa >= mips_avail_start,
"pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")",
pa, mips_avail_start);
KASSERTMSG(pa < mips_avail_end,
"pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")",
pa, mips_avail_end);
/*
* we need to return a direct-mapped VA for the pa.
*/
#ifdef _LP64
const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
#endif
return (void *)va;
}
示例12: r3k_pdcache_wbinv_all
void
r3k_pdcache_wbinv_all(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_cache_info.mci_pdcache_size;
/* Cache is write-through. */
r3k_pdcache_do_inv(va, eva);
}
示例13: mipsNN_icache_sync_range_index_32
void
mipsNN_icache_sync_range_index_32(vaddr_t va, vsize_t size)
{
struct mips_cache_info * const mci = &mips_cache_info;
vaddr_t eva, tmpva;
int i, stride, loopcount;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);
eva = round_line32(va + size);
va = trunc_line32(va);
/*
* If we are going to flush more than is in a way, we are flushing
* everything.
*/
if (eva - va >= mci->mci_picache_way_size) {
mipsNN_icache_sync_all_32();
return;
}
/*
* GCC generates better code in the loops if we reference local
* copies of these global variables.
*/
stride = picache_stride;
loopcount = picache_loopcount;
mips_intern_dcache_wbinv_range_index(va, (eva - va));
while ((eva - va) >= (8 * 32)) {
tmpva = va;
for (i = 0; i < loopcount; i++, tmpva += stride) {
cache_r4k_op_8lines_32(tmpva,
CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
}
va += 8 * 32;
}
while (va < eva) {
tmpva = va;
for (i = 0; i < loopcount; i++, tmpva += stride) {
cache_op_r4k_line(tmpva,
CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
}
va += 32;
}
}
示例14: mipsNN_pdcache_wbinv_range_index_32_intern
static void
mipsNN_pdcache_wbinv_range_index_32_intern(vaddr_t va, vaddr_t eva)
{
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va);
eva = MIPS_PHYS_TO_KSEG0(eva);
for (; (eva - va) >= (8 * 32); va += 8 * 32) {
cache_r4k_op_8lines_32(va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
}
for (; va < eva; va += 32) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
}
}
示例15: r4k_pdcache_wbinv_all_32
void
r4k_pdcache_wbinv_all_32(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
while (va < eva) {
cache_r4k_op_32lines_32(va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 32);
}
}