本文整理汇总了C++中PHYS_TO_VM_PAGE函数的典型用法代码示例。如果您正苦于以下问题:C++ PHYS_TO_VM_PAGE函数的具体用法?C++ PHYS_TO_VM_PAGE怎么用?C++ PHYS_TO_VM_PAGE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PHYS_TO_VM_PAGE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _bus_dmamem_free
/*
* Common function for freeing DMA-safe memory. May be called by
* bus-specific DMA memory free functions.
*/
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
struct vm_page *m;
bus_addr_t addr;
struct pglist mlist;
int curseg;
#ifdef DEBUG_DMA
printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
#endif /* DEBUG_DMA */
/*
* Build a list of pages to free back to the VM system.
*/
TAILQ_INIT(&mlist);
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE) {
m = PHYS_TO_VM_PAGE(addr);
TAILQ_INSERT_TAIL(&mlist, m, pageq);
}
}
uvm_pglistfree(&mlist);
}
示例2: cpu_uarea_free
/*
* Return true if we freed it, false if we didn't.
*/
bool
cpu_uarea_free(void *va)
{
#ifdef _LP64
if (!MIPS_XKPHYS_P(va))
return false;
paddr_t pa = MIPS_XKPHYS_TO_PHYS(va);
#else
if (!MIPS_KSEG0_P(va))
return false;
paddr_t pa = MIPS_KSEG0_TO_PHYS(va);
#endif
#ifdef MIPS3_PLUS
if (MIPS_CACHE_VIRTUAL_ALIAS)
mips_dcache_inv_range((vaddr_t)va, USPACE);
#endif
for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg != NULL);
uvm_pagefree(pg);
}
return true;
}
示例3: _bus_dmamem_free
/*
* Common function for freeing DMA-safe memory. May be called by
* bus-specific DMA memory free functions.
*/
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
struct vm_page *m;
bus_addr_t addr;
struct pglist mlist;
int curseg;
DPRINTF(("bus_dmamem_free: t = %p, segs = %p, nsegs = %d\n", t, segs, nsegs));
/*
* Build a list of pages to free back to the VM system.
*/
TAILQ_INIT(&mlist);
for (curseg = 0; curseg < nsegs; curseg++) {
DPRINTF(("bus_dmamem_free: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE) {
m = PHYS_TO_VM_PAGE(addr);
DPRINTF(("bus_dmamem_free: m = %p\n", m));
TAILQ_INSERT_TAIL(&mlist, m, pageq);
}
}
uvm_pglistfree(&mlist);
}
示例4: _dmamap_sync
/*
* Common function for DMA map synchronization. May be called
* by bus-specific DMA map synchronization functions.
*/
void
_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
bus_size_t len, int op)
{
int i;
bus_size_t minlen, wlen;
bus_addr_t pa, addr;
struct vm_page *pg;
for (i = 0; i < map->dm_nsegs && len != 0; i++) {
/* Find the beginning segment. */
if (offset >= map->dm_segs[i].ds_len) {
offset -= map->dm_segs[i].ds_len;
continue;
}
minlen = len < map->dm_segs[i].ds_len - offset ?
len : map->dm_segs[i].ds_len - offset;
addr = map->dm_segs[i].ds_addr + offset;
switch (op) {
case BUS_DMASYNC_POSTWRITE:
for (pa = trunc_page(addr), wlen = 0;
pa < round_page(addr + minlen);
pa += PAGE_SIZE) {
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL)
atomic_clearbits_int(&pg->pg_flags,
PG_PMAP_EXE);
}
}
}
}
示例5: __mm_mem_addr
/*
* bool __mm_mem_addr(paddr_t pa):
* Check specified physical address is memory device.
*/
bool
__mm_mem_addr(paddr_t pa)
{
return ((atop(pa) < vm_physmem[0].start || PHYS_TO_VM_PAGE(pa) != NULL)
? true : false);
}
示例6: uma_small_free
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
vm_page_t m;
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
m->wire_count--;
vm_page_free(m);
atomic_subtract_int(&cnt.v_wire_count, 1);
}
示例7: XX_PhysToVirt
void *
XX_PhysToVirt(physAddress_t addr)
{
struct pv_entry *pv;
vm_page_t page;
int cpu;
/* Check CCSR */
if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size)
return ((void *)((vm_offset_t)(addr - ccsrbar_pa) +
ccsrbar_va));
cpu = PCPU_GET(cpuid);
/* Handle BMAN mappings */
if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) &&
(addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
(vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));
if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) &&
(addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
(vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));
/* Handle QMAN mappings */
if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) &&
(addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] +
(vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu])));
if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) &&
(addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] +
(vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu])));
page = PHYS_TO_VM_PAGE(addr);
pv = TAILQ_FIRST(&page->md.pv_list);
if (pv != NULL)
return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK)));
if (PMAP_HAS_DMAP)
return ((void *)(uintptr_t)PHYS_TO_DMAP(addr));
printf("NetCommSW: "
"Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr);
return (NULL);
}
示例8: db_page_cmd
void
db_page_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
{
if (!have_addr) {
db_printf("Need paddr for page\n");
return;
}
db_printf("pa %llx pg %p\n", (unsigned long long)addr,
PHYS_TO_VM_PAGE(addr));
}
示例9: uma_small_free
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
vm_page_t m;
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
m = PHYS_TO_VM_PAGE(pa);
m->wire_count--;
vm_page_free(m);
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
}
示例10: vm_pgmoveco
/*
* Identify the physical page mapped at the given kernel virtual
* address. Insert this physical page into the given address space at
* the given virtual address, replacing the physical page, if any,
* that already exists there.
*/
static int
vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
{
vm_map_t map = mapa;
vm_page_t kern_pg, user_pg;
vm_object_t uobject;
vm_map_entry_t entry;
vm_pindex_t upindex;
vm_prot_t prot;
boolean_t wired;
KASSERT((uaddr & PAGE_MASK) == 0,
("vm_pgmoveco: uaddr is not page aligned"));
/*
* Herein the physical page is validated and dirtied. It is
* unwired in sf_buf_mext().
*/
kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
kern_pg->valid = VM_PAGE_BITS_ALL;
KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
("vm_pgmoveco: kern_pg is not correctly wired"));
if ((vm_map_lookup(&map, uaddr,
VM_PROT_WRITE, &entry, &uobject,
&upindex, &prot, &wired)) != KERN_SUCCESS) {
return(EFAULT);
}
VM_OBJECT_LOCK(uobject);
retry:
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
goto retry;
vm_page_lock_queues();
pmap_remove_all(user_pg);
vm_page_free(user_pg);
} else {
/*
* Even if a physical page does not exist in the
* object chain's first object, a physical page from a
* backing object may be mapped read only.
*/
if (uobject->backing_object != NULL)
pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
vm_page_lock_queues();
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);
}
示例11: vsunlock
int
vsunlock(
user_addr_t addr,
user_size_t len,
__unused int dirtied)
{
#if FIXME /* [ */
pmap_t pmap;
vm_page_t pg;
vm_map_offset_t vaddr;
ppnum_t paddr;
#endif /* FIXME ] */
kern_return_t kret;
vm_map_t map;
map = current_map();
#if FIXME /* [ */
if (dirtied) {
pmap = get_task_pmap(current_task());
for (vaddr = vm_map_trunc_page(addr, PAGE_MASK);
vaddr < vm_map_round_page(addr+len, PAGE_MASK);
vaddr += PAGE_SIZE) {
paddr = pmap_extract(pmap, vaddr);
pg = PHYS_TO_VM_PAGE(paddr);
vm_page_set_modified(pg);
}
}
#endif /* FIXME ] */
#ifdef lint
dirtied++;
#endif /* lint */
kret = vm_map_unwire(map,
vm_map_trunc_page(addr,
vm_map_page_mask(map)),
vm_map_round_page(addr+len,
vm_map_page_mask(map)),
FALSE);
switch (kret) {
case KERN_SUCCESS:
return (0);
case KERN_INVALID_ADDRESS:
case KERN_NO_SPACE:
return (ENOMEM);
case KERN_PROTECTION_FAILURE:
return (EACCES);
default:
return (EINVAL);
}
}
示例12: v2sizev
static u_long *
v2sizev(vm_offset_t va)
{
vm_paddr_t pa;
struct vm_page *p;
pa = pmap_kextract(va);
if (pa == 0)
panic("MemGuard detected double-free of %p", (void *)va);
p = PHYS_TO_VM_PAGE(pa);
KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
return (&p->plinks.memguard.v);
}
示例13: uma_small_free
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
vm_page_t m;
if (!hw_direct_map)
pmap_remove(kernel_pmap,(vm_offset_t)mem,
(vm_offset_t)mem + PAGE_SIZE);
m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
m->wire_count--;
vm_page_free(m);
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
atomic_subtract_int(&hw_uma_mdpages, 1);
}
示例14: uvm_km_pgremove_intrsafe
void
uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
{
struct vm_page *pg;
vaddr_t va;
paddr_t pa;
for (va = start; va < end; va += PAGE_SIZE) {
if (!pmap_extract(pmap_kernel(), va, &pa))
continue;
pg = PHYS_TO_VM_PAGE(pa);
if (pg == NULL)
panic("uvm_km_pgremove_intrsafe: no page");
uvm_pagefree(pg);
}
}
示例15: contigfree
/*
* No requirements.
*/
void
contigfree(void *addr, unsigned long size, struct malloc_type *type)
{
vm_paddr_t pa;
vm_page_t m;
if (size == 0)
panic("vm_contig_pg_kmap: size must not be 0");
size = round_page(size);
pa = pmap_extract(&kernel_pmap, (vm_offset_t)addr);
pmap_qremove((vm_offset_t)addr, size / PAGE_SIZE);
kmem_free(&kernel_map, (vm_offset_t)addr, size);
m = PHYS_TO_VM_PAGE(pa);
vm_page_free_contig(m, size);
}