本文整理汇总了C++中VM_PAGE_TO_PHYS函数的典型用法代码示例。如果您正苦于以下问题:C++ VM_PAGE_TO_PHYS函数的具体用法?C++ VM_PAGE_TO_PHYS怎么用?C++ VM_PAGE_TO_PHYS使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VM_PAGE_TO_PHYS函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _hpcmips_bd_mem_alloc_range
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_hpcmips_bd_mem_alloc_range(bus_dma_tag_t t, bus_size_t size,
bus_size_t alignment, bus_size_t boundary,
bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, paddr_t low, paddr_t high)
{
vaddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
#ifdef DIAGNOSTIC
extern paddr_t avail_start, avail_end; /* XXX */
high = high<(avail_end - PAGE_SIZE)? high: (avail_end - PAGE_SIZE);
low = low>avail_start? low: avail_start;
#endif
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = m->pageq.queue.tqe_next;
for (; m != NULL; m = m->pageq.queue.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("uvm_pglistalloc returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_hpcmips_bd_mem_alloc");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}
示例2: uma_small_alloc
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
void *va;
vm_page_t m;
int pflags;
*flags = UMA_SLAB_PRIV;
pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
for (;;) {
m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
VM_WAIT;
} else
break;
}
va = (void *) VM_PAGE_TO_PHYS(m);
if (!hw_direct_map)
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
atomic_add_int(&hw_uma_mdpages, 1);
return (va);
}
示例3: _dmamem_alloc_range
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, vaddr_t low, vaddr_t high)
{
vaddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error, plaflag;
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
if (flags & BUS_DMA_ZERO)
plaflag |= UVM_PLA_ZERO;
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(size, low, high,
alignment, boundary, &mlist, nsegs, plaflag);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = TAILQ_FIRST(&mlist);
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = TAILQ_NEXT(m, pageq);
for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("vm_page_alloc_memory returned non-sensical"
" address 0x%lx\n", curaddr);
panic("dmamem_alloc_range");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}
示例4: _bus_dmamem_alloc
/*
* Allocate memory safe for DMA.
*/
int
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags)
{
struct pglist mlist;
paddr_t curaddr, lastaddr;
struct vm_page *m;
int curseg, error, plaflag;
DPRINTF(("bus_dmamem_alloc: t = %p, size = %ld, alignment = %ld, boundary = %ld, segs = %p, nsegs = %d, rsegs = %p, flags = %x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags));
/* Always round the size. */
size = round_page(size);
/*
* Allocate the pages from the VM system.
*/
plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
if (flags & BUS_DMA_ZERO)
plaflag |= UVM_PLA_ZERO;
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(size, 0, -1, alignment, boundary,
&mlist, nsegs, plaflag);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
DPRINTF(("bus_dmamem_alloc: m = %p, lastaddr = 0x%08lx\n",m,lastaddr));
while ((m = TAILQ_NEXT(m, pageq)) != NULL) {
curaddr = VM_PAGE_TO_PHYS(m);
DPRINTF(("bus_dmamem_alloc: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n", m, curaddr, lastaddr));
if (curaddr == (lastaddr + PAGE_SIZE)) {
segs[curseg].ds_len += PAGE_SIZE;
} else {
DPRINTF(("bus_dmamem_alloc: new segment\n"));
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
DPRINTF(("bus_dmamem_alloc: curseg = %d, *rsegs = %d\n",curseg,*rsegs));
return (0);
}
示例5: _bus_dmamem_alloc_range
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, paddr_t low, paddr_t high)
{
paddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
/* Always round the size. */
size = round_page(size);
high = avail_end - PAGE_SIZE;
/*
* Allocate pages from the VM system.
*/
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return error;
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = TAILQ_FIRST(&mlist);
curseg = 0;
lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
segs[curseg].ds_len = PAGE_SIZE;
m = TAILQ_NEXT(m, pageq.queue);
for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < avail_start || curaddr >= high) {
printf("uvm_pglistalloc returned non-sensical"
" address 0x%llx\n", (long long)curaddr);
panic("_bus_dmamem_alloc_range");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr + t->dma_offset;
segs[curseg].ds_len = PAGE_SIZE;
segs[curseg]._ds_paddr = curaddr;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return 0;
}
示例6: _bus_dmamem_alloc_range_common
/*
* _bus_dmamem_alloc_range_common --
* Allocate physical memory from the specified physical address range.
*/
int
_bus_dmamem_alloc_range_common(bus_dma_tag_t t,
bus_size_t size,
bus_size_t alignment,
bus_size_t boundary,
bus_dma_segment_t *segs,
int nsegs,
int *rsegs,
int flags,
paddr_t low,
paddr_t high)
{
paddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
/* Always round the size. */
size = round_page(size);
/* Allocate pages from the VM system. */
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (__predict_false(error != 0))
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM system.
*/
m = TAILQ_FIRST(&mlist);
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = TAILQ_NEXT(m, pageq.queue);
for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
curaddr = VM_PAGE_TO_PHYS(m);
KASSERT(curaddr >= low);
KASSERT(curaddr < high);
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}
示例7: efi_1t1_pte
static pt_entry_t *
efi_1t1_pte(vm_offset_t va)
{
pml4_entry_t *pml4e;
pdp_entry_t *pdpe;
pd_entry_t *pde;
pt_entry_t *pte;
vm_page_t m;
vm_pindex_t pml4_idx, pdp_idx, pd_idx;
vm_paddr_t mphys;
pml4_idx = pmap_pml4e_index(va);
pml4e = &efi_pml4[pml4_idx];
if (*pml4e == 0) {
m = efi_1t1_page(1 + pml4_idx);
mphys = VM_PAGE_TO_PHYS(m);
*pml4e = mphys | X86_PG_RW | X86_PG_V;
} else {
mphys = *pml4e & ~PAGE_MASK;
}
pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys);
pdp_idx = pmap_pdpe_index(va);
pdpe += pdp_idx;
if (*pdpe == 0) {
m = efi_1t1_page(1 + NPML4EPG + (pml4_idx + 1) * (pdp_idx + 1));
mphys = VM_PAGE_TO_PHYS(m);
*pdpe = mphys | X86_PG_RW | X86_PG_V;
} else {
mphys = *pdpe & ~PAGE_MASK;
}
pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
pd_idx = pmap_pde_index(va);
pde += pd_idx;
if (*pde == 0) {
m = efi_1t1_page(1 + NPML4EPG + NPML4EPG * NPDPEPG +
(pml4_idx + 1) * (pdp_idx + 1) * (pd_idx + 1));
mphys = VM_PAGE_TO_PHYS(m);
*pde = mphys | X86_PG_RW | X86_PG_V;
} else {
mphys = *pde & ~PAGE_MASK;
}
pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
pte += pmap_pte_index(va);
KASSERT(*pte == 0, ("va %#jx *pt %#jx", va, *pte));
return (pte);
}
示例8: efi_1t1_l3
static pt_entry_t *
efi_1t1_l3(vm_offset_t va)
{
pd_entry_t *l0, *l1, *l2;
pt_entry_t *l3;
vm_pindex_t l0_idx, l1_idx, l2_idx;
vm_page_t m;
vm_paddr_t mphys;
l0_idx = pmap_l0_index(va);
l0 = &efi_l0[l0_idx];
if (*l0 == 0) {
m = efi_1t1_page(1 + l0_idx);
mphys = VM_PAGE_TO_PHYS(m);
*l0 = mphys | L0_TABLE;
} else {
mphys = *l0 & ~ATTR_MASK;
}
l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
l1_idx = pmap_l1_index(va);
l1 += l1_idx;
if (*l1 == 0) {
m = efi_1t1_page(1 + L0_ENTRIES + (l0_idx + 1) * (l1_idx + 1));
mphys = VM_PAGE_TO_PHYS(m);
*l1 = mphys | L1_TABLE;
} else {
mphys = *l1 & ~ATTR_MASK;
}
l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
l2_idx = pmap_l2_index(va);
l2 += l2_idx;
if (*l2 == 0) {
m = efi_1t1_page(1 + L0_ENTRIES + L0_ENTRIES * Ln_ENTRIES +
(l0_idx + 1) * (l1_idx + 1) * (l2_idx + 1));
mphys = VM_PAGE_TO_PHYS(m);
*l2 = mphys | L2_TABLE;
} else {
mphys = *l2 & ~ATTR_MASK;
}
l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
l3 += pmap_l3_index(va);
KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
va, *l3));
return (l3);
}
示例9: gdt_grow
/*
* Grow the GDT.
*/
void
gdt_grow(int which)
{
size_t old_len, new_len;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct vm_page *pg;
vaddr_t va;
old_len = gdt_size[which] * sizeof(gdt[0]);
gdt_size[which] <<= 1;
new_len = old_len << 1;
#ifdef XEN
if (which != 0) {
size_t max_len = MAXGDTSIZ * sizeof(gdt[0]);
if (old_len == 0) {
gdt_size[which] = MINGDTSIZ;
new_len = gdt_size[which] * sizeof(gdt[0]);
}
for(va = (vaddr_t)(cpu_info_primary.ci_gdt) + old_len + max_len;
va < (vaddr_t)(cpu_info_primary.ci_gdt) + new_len + max_len;
va += PAGE_SIZE) {
while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
== NULL) {
uvm_wait("gdt_grow");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
return;
}
#endif
for (CPU_INFO_FOREACH(cii, ci)) {
for (va = (vaddr_t)(ci->ci_gdt) + old_len;
va < (vaddr_t)(ci->ci_gdt) + new_len;
va += PAGE_SIZE) {
while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
NULL) {
uvm_wait("gdt_grow");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
}
pmap_update(pmap_kernel());
}
示例10: uma_small_alloc
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
static vm_pindex_t color;
void *va;
vm_page_t m;
int pflags;
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
else
pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
if (wait & M_ZERO)
pflags |= VM_ALLOC_ZERO;
for (;;) {
m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
VM_WAIT;
} else
break;
}
va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
return (va);
}
示例11: uma_small_alloc
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
{
vm_paddr_t pa;
vm_page_t m;
int pflags;
void *va;
*flags = UMA_SLAB_PRIV;
pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
for (;;) {
m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
else
pmap_grow_direct_page_cache();
} else
break;
}
pa = VM_PAGE_TO_PHYS(m);
va = (void *)MIPS_PHYS_TO_DIRECT(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
return (va);
}
示例12: _bus_dmamap_load_bio
/*
* Load from block io.
*/
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
int *nsegs, int flags)
{
vm_paddr_t paddr;
bus_size_t len, tlen;
int error, i, ma_offs;
if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
return (error);
}
error = 0;
tlen = bio->bio_bcount;
ma_offs = bio->bio_ma_offset;
for (i = 0; tlen > 0; i++, tlen -= len) {
len = min(PAGE_SIZE - ma_offs, tlen);
paddr = VM_PAGE_TO_PHYS(bio->bio_ma[i]) + ma_offs;
error = _bus_dmamap_load_phys(dmat, map, paddr, len,
flags, NULL, nsegs);
if (error != 0)
break;
ma_offs = 0;
}
return (error);
}
示例13: gdt_init
/*
* Initialize the GDT subsystem. Called from autoconf().
*/
void
gdt_init(void)
{
struct vm_page *pg;
vaddr_t va;
struct cpu_info *ci = &cpu_info_primary;
gdt_next = NGDT;
gdt_free = GNULL_SEL;
gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ;
va += PAGE_SIZE) {
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
if (pg == NULL)
panic("gdt_init: no pages");
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
PROT_READ | PROT_WRITE);
}
bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
ci->ci_gdt = gdt;
setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
SDT_MEMRWA, SEL_KPL, 0, 0);
gdt_init_cpu(ci);
}
示例14: efi_arch_enter
/*
* Create an environment for the EFI runtime code call. The most
* important part is creating the required 1:1 physical->virtual
* mappings for the runtime segments. To do that, we manually create
* page table which unmap userspace but gives correct kernel mapping.
* The 1:1 mappings for runtime segments usually occupy low 4G of the
* physical address map.
*
* The 1:1 mappings were chosen over the SetVirtualAddressMap() EFI RT
* service, because there are some BIOSes which fail to correctly
* relocate itself on the call, requiring both 1:1 and virtual
* mapping. As result, we must provide 1:1 mapping anyway, so no
* reason to bother with the virtual map, and no need to add a
* complexity into loader.
*
* The fpu_kern_enter() call allows firmware to use FPU, as mandated
* by the specification. In particular, CR0.TS bit is cleared. Also
* it enters critical section, giving us neccessary protection against
* context switch.
*
* There is no need to disable interrupts around the change of %cr3,
* the kernel mappings are correct, while we only grabbed the
* userspace portion of VA. Interrupts handlers must not access
* userspace. Having interrupts enabled fixes the issue with
* firmware/SMM long operation, which would negatively affect IPIs,
* esp. TLB shootdown requests.
*/
int
efi_arch_enter(void)
{
pmap_t curpmap;
curpmap = PCPU_GET(curpmap);
PMAP_LOCK_ASSERT(curpmap, MA_OWNED);
/*
* IPI TLB shootdown handler invltlb_pcid_handler() reloads
* %cr3 from the curpmap->pm_cr3, which would disable runtime
* segments mappings. Block the handler's action by setting
* curpmap to impossible value. See also comment in
* pmap.c:pmap_activate_sw().
*/
if (pmap_pcid_enabled && !invpcid_works)
PCPU_SET(curpmap, NULL);
load_cr3(VM_PAGE_TO_PHYS(efi_pml4_page) | (pmap_pcid_enabled ?
curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
/*
* If PCID is enabled, the clear CR3_PCID_SAVE bit in the loaded %cr3
* causes TLB invalidation.
*/
if (!pmap_pcid_enabled)
invltlb();
return (0);
}
示例15: cpu_uarea_alloc
void *
cpu_uarea_alloc(bool system)
{
struct pglist pglist;
int error;
/*
* Allocate a new physically contiguous uarea which can be
* direct-mapped.
*/
error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
if (error) {
return NULL;
}
/*
* Get the physical address from the first page.
*/
const struct vm_page * const pg = TAILQ_FIRST(&pglist);
KASSERT(pg != NULL);
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
/*
* We need to return a direct-mapped VA for the pa.
*/
return (void *)PMAP_MAP_POOLPAGE(pa);
}