本文整理汇总了C++中pmap_kenter_pa函数的典型用法代码示例。如果您正苦于以下问题:C++ pmap_kenter_pa函数的具体用法?C++ pmap_kenter_pa怎么用?C++ pmap_kenter_pa使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pmap_kenter_pa函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: hibernate_quiesce_cpus
/*
* Quiesce CPUs in a multiprocessor machine before resuming. We need to do
* this since the APs will be hatched (but waiting for CPUF_GO), and we don't
* want the APs to be executing code and causing side effects during the
* unpack operation.
*/
void
hibernate_quiesce_cpus(void)
{
struct cpu_info *ci;
u_long i;
KASSERT(CPU_IS_PRIMARY(curcpu()));
pmap_kenter_pa(ACPI_TRAMPOLINE, ACPI_TRAMPOLINE, PROT_READ | PROT_EXEC);
pmap_kenter_pa(ACPI_TRAMP_DATA, ACPI_TRAMP_DATA,
PROT_READ | PROT_WRITE);
for (i = 0; i < MAXCPUS; i++) {
ci = cpu_info[i];
if (ci == NULL)
continue;
if (ci->ci_idle_pcb == NULL)
continue;
if ((ci->ci_flags & CPUF_PRESENT) == 0)
continue;
if (ci->ci_flags & (CPUF_BSP | CPUF_SP | CPUF_PRIMARY))
continue;
atomic_setbits_int(&ci->ci_flags, CPUF_GO | CPUF_PARK);
}
/* Wait a bit for the APs to park themselves */
delay(500000);
pmap_kremove(ACPI_TRAMPOLINE, PAGE_SIZE);
pmap_kremove(ACPI_TRAMP_DATA, PAGE_SIZE);
}
示例2: gdt_grow
/*
* Grow the GDT.
*/
void
gdt_grow(int which)
{
size_t old_len, new_len;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct vm_page *pg;
vaddr_t va;
old_len = gdt_size[which] * sizeof(gdt[0]);
gdt_size[which] <<= 1;
new_len = old_len << 1;
#ifdef XEN
if (which != 0) {
size_t max_len = MAXGDTSIZ * sizeof(gdt[0]);
if (old_len == 0) {
gdt_size[which] = MINGDTSIZ;
new_len = gdt_size[which] * sizeof(gdt[0]);
}
for(va = (vaddr_t)(cpu_info_primary.ci_gdt) + old_len + max_len;
va < (vaddr_t)(cpu_info_primary.ci_gdt) + new_len + max_len;
va += PAGE_SIZE) {
while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
== NULL) {
uvm_wait("gdt_grow");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
return;
}
#endif
for (CPU_INFO_FOREACH(cii, ci)) {
for (va = (vaddr_t)(ci->ci_gdt) + old_len;
va < (vaddr_t)(ci->ci_gdt) + new_len;
va += PAGE_SIZE) {
while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
NULL) {
uvm_wait("gdt_grow");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
}
pmap_update(pmap_kernel());
}
示例3: gdt_init
/*
* Initialize the GDT.
*/
void
gdt_init(void)
{
char *old_gdt;
struct vm_page *pg;
vaddr_t va;
struct cpu_info *ci = &cpu_info_primary;
gdt_next = 0;
gdt_free = GNULL_SEL;
old_gdt = gdtstore;
gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MAXGDTSIZ;
va += PAGE_SIZE) {
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
if (pg == NULL) {
panic("gdt_init: no pages");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
bcopy(old_gdt, gdtstore, DYNSEL_START);
ci->ci_gdt = gdtstore;
set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);
gdt_init_cpu(ci);
}
示例4: obio_bs_map
int
obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
const struct pmap_devmap *pd;
paddr_t startpa, endpa, pa, offset;
vaddr_t va;
pt_entry_t *pte;
if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
/* Device was statically mapped. */
*bshp = pd->pd_va + (bpa - pd->pd_pa);
return (0);
}
endpa = round_page(bpa + size);
offset = bpa & PAGE_MASK;
startpa = trunc_page(bpa);
va = uvm_km_valloc(kernel_map, endpa - startpa);
if (va == 0)
return ENOMEM;
*bshp = va + offset;
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
PTE_SYNC(pte);
}
pmap_update(pmap_kernel());
return (0);
}
示例5: mainbus_bs_map
int
mainbus_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp)
{
u_long startpa, endpa, pa;
vaddr_t va;
if ((u_long)bpa > (u_long)KERNEL_BASE) {
/* XXX This is a temporary hack to aid transition. */
*bshp = bpa;
return(0);
}
startpa = trunc_page(bpa);
endpa = round_page(bpa + size);
/* XXX use extent manager to check duplicate mapping */
va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
if (! va)
return(ENOMEM);
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
const int pmapflags =
(flags & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
? 0
: PMAP_NOCACHE;
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, pmapflags);
}
pmap_update(pmap_kernel());
return(0);
}
示例6: acpi_map
int
acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
{
paddr_t pgpa = trunc_page(pa);
paddr_t endpa = round_page(pa + len);
vaddr_t va = (vaddr_t)km_alloc(endpa - pgpa, &kv_any, &kp_none,
&kd_nowait);
if (va == 0)
return (ENOMEM);
handle->baseva = va;
handle->va = (u_int8_t *)(va + (pa & PGOFSET));
handle->vsize = endpa - pgpa;
handle->pa = pa;
do {
pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
va += NBPG;
pgpa += NBPG;
} while (pgpa < endpa);
pmap_update(pmap_kernel());
return 0;
}
示例7: vmapbuf
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/
int
vmapbuf(struct buf *bp, vsize_t len)
{
struct pmap *upmap;
vaddr_t uva; /* User VA (map from) */
vaddr_t kva; /* Kernel VA (new to) */
paddr_t pa; /* physical address */
vsize_t off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
bp->b_saveaddr = bp->b_data;
uva = trunc_page((vaddr_t)bp->b_data);
off = (vaddr_t)bp->b_data - uva;
len = round_page(off + len);
kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
bp->b_data = (void *)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
do {
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
/* Now map the page into kernel space. */
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
} while (len);
pmap_update(pmap_kernel());
return 0;
}
示例8: vmapbuf
/*
* Map a user I/O request into kernel virtual address space.
*/
int
vmapbuf(struct buf *bp, vsize_t len)
{
vaddr_t kva; /* Kernel VA (new to) */
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
vaddr_t uva = mips_trunc_page(bp->b_data);
const vaddr_t off = (vaddr_t)bp->b_data - uva;
len = mips_round_page(off + len);
kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
bp->b_saveaddr = bp->b_data;
bp->b_data = (void *)(kva + off);
struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
do {
paddr_t pa; /* physical address */
if (pmap_extract(upmap, uva, &pa) == false)
panic("vmapbuf: null page frame");
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
PMAP_WIRED);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
} while (len);
pmap_update(pmap_kernel());
return 0;
}
示例9: gdt_init
/*
* Initialize the GDT subsystem. Called from autoconf().
*/
void
gdt_init()
{
size_t max_len, min_len;
struct vm_page *pg;
vaddr_t va;
struct cpu_info *ci = &cpu_info_primary;
simple_lock_init(&gdt_simplelock);
lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);
max_len = MAXGDTSIZ * sizeof(union descriptor);
min_len = MINGDTSIZ * sizeof(union descriptor);
gdt_size = MINGDTSIZ;
gdt_count = NGDT;
gdt_next = NGDT;
gdt_free = GNULL_SEL;
gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
if (pg == NULL)
panic("gdt_init: no pages");
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
ci->ci_gdt = gdt;
setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
SDT_MEMRWA, SEL_KPL, 0, 0);
gdt_init_cpu(ci);
}
示例10: gdt_grow
/*
* Grow the GDT.
*/
void
gdt_grow()
{
size_t old_len, new_len;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct vm_page *pg;
vaddr_t va;
old_len = gdt_size * sizeof(union descriptor);
gdt_size <<= 1;
new_len = old_len << 1;
CPU_INFO_FOREACH(cii, ci) {
for (va = (vaddr_t)(ci->ci_gdt) + old_len;
va < (vaddr_t)(ci->ci_gdt) + new_len;
va += PAGE_SIZE) {
while (
(pg =
uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
NULL) {
uvm_wait("gdt_grow");
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
}
}
}
示例11: vmm_km_zalloc
vaddr_t vmm_km_zalloc(size_t size) {
// Pre kernel heap unmanaged memory allocator
// This should not only be used before kheap_init has been called
static vaddr_t placement_addr = 0;
if(placement_addr == 0) {
pmap_virtual_space(NULL, &kernel_vend);
placement_addr = kernel_vend;
}
// Make sure enough memory is left!
kassert((UINT32_MAX-placement_addr) >= size);
vaddr_t start = placement_addr;
vaddr_t end = placement_addr + size;
// Allocate a new page if there is not enough memory
if(end >= kernel_vend) {
// Loop through and allocate pages until we have enough memory to serve the requested size
for( ; kernel_vend < end; kernel_vend+=PAGESIZE) {
paddr_t pa = pmm_alloc();
pmap_kenter_pa(kernel_vend, pa, VM_PROT_DEFAULT, PMAP_WRITE_BACK);
}
}
// Zero the memory
memset((void*)placement_addr, 0x0, size);
placement_addr = end;
return(start);
}
示例12: vmm_km_heap_extend
// TODO: This function shouldn't need to exist. Find another way
vaddr_t vmm_km_heap_extend(size_t size) {
vregion_t *region = &vmap_kernel()->regions[2];
kassert((UINT32_MAX - region->vend) > ROUND_PAGE(size));
vaddr_t prev_vend = region->vend;
region->vend += ROUND_PAGE(size);
for(vaddr_t va = prev_vend; va < region->vend; va += PAGESIZE) {
// Allocate a free page if one should be available else panic
paddr_t pa = pmm_alloc();
kassert(pa != UINTPTR_MAX);
// TODO: Use pmap_enter here instead
pmap_kenter_pa(va, pa, region->vm_prot, PMAP_WIRED | PMAP_WRITE_COMBINE);
// Enter the information into the amap
region->aref.amap->aslots[(uint32_t)((double)(va-region->vstart)/(double)PAGESIZE)]->page->vaddr = va;
}
memset((vaddr_t*)prev_vend, 0, PAGESIZE);
vmap_kernel()->heap_end = region->vend;
uint32_t new_size = region->vend - region->vstart;
region->aref.amap->maxslots = region->aref.amap->nslots = (uint32_t)((double)new_size/(double)PAGESIZE);
return prev_vend;
}
示例13: vme_map_r
int
vme_map_r(const struct vme_range *r, paddr_t pa, psize_t len, int flags,
vm_prot_t prot, vaddr_t *rva)
{
vaddr_t ova, va;
u_int pg;
ova = va = uvm_km_valloc(kernel_map, len);
if (va == 0)
return ENOMEM;
pa += r->vr_base;
for (pg = atop(len); pg != 0; pg--) {
pmap_kenter_pa(va, pa, prot);
va += PAGE_SIZE;
pa += PAGE_SIZE;
}
if (flags & BUS_SPACE_MAP_CACHEABLE)
pmap_cache_ctrl(ova, ova + len, CACHE_GLOBAL);
pmap_update(pmap_kernel());
*rva = ova;
return 0;
}
示例14: pmap_steal_memory
vaddr_t pmap_steal_memory(size_t vsize) {
// pmap_init must be called before this function can be used, otherwise
// kernel_vend will be an incorrect value
// kernel_vend and kernel_pend should be page aligned
// This function should only be used before pmm_init is called
static vaddr_t placement_addr = 0;
placement_addr = (placement_addr == 0) ? kernel_vend : placement_addr;
// Make sure enough memory is left!
kassert((UINT32_MAX-placement_addr) >= vsize);
vaddr_t start = placement_addr;
vaddr_t end = placement_addr + vsize;
// Allocate a new page if there is not enough memory
if(end >= kernel_vend) {
// Loop through and map the pages using pmap_kenter_pa while incrementing kernel_pend and kernel_vend
for(; kernel_vend < end; kernel_vend+=PAGESIZE, kernel_pend+=PAGESIZE) {
pmap_kenter_pa(kernel_vend, kernel_pend, VM_PROT_DEFAULT, PMAP_WRITE_BACK);
}
}
// Zero the memory
memset((void*)placement_addr, 0x0, vsize);
placement_addr = end;
return(start);
}
示例15: hibernate_prepare_resume_machdep
/*
* MD-specific resume preparation (creating resume time pagetables,
* stacks, etc).
*/
void
hibernate_prepare_resume_machdep(union hibernate_info *hib_info)
{
paddr_t pa, piglet_end;
vaddr_t va;
/*
* At this point, we are sure that the piglet's phys space is going to
* have been unused by the suspending kernel, but the vaddrs used by
* the suspending kernel may or may not be available to us here in the
* resuming kernel, so we allocate a new range of VAs for the piglet.
* Those VAs will be temporary and will cease to exist as soon as we
* switch to the resume PT, so we need to ensure that any VAs required
* during inflate are also entered into that map.
*/
hib_info->piglet_va = (vaddr_t)km_alloc(HIBERNATE_CHUNK_SIZE*3,
&kv_any, &kp_none, &kd_nowait);
if (!hib_info->piglet_va)
panic("Unable to allocate vaddr for hibernate resume piglet\n");
piglet_end = hib_info->piglet_pa + HIBERNATE_CHUNK_SIZE*3;
for (pa = hib_info->piglet_pa,va = hib_info->piglet_va;
pa <= piglet_end; pa += PAGE_SIZE, va += PAGE_SIZE)
pmap_kenter_pa(va, pa, VM_PROT_ALL);
pmap_activate(curproc);
}