本文整理汇总了C++中round_page函数的典型用法代码示例。如果您正苦于以下问题:C++ round_page函数的具体用法?C++ round_page怎么用?C++ round_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了round_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: memrw
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
struct iovec *iov;
int error = 0;
vm_offset_t va, eva, off, v;
vm_prot_t prot;
struct vm_page m;
vm_page_t marr;
vm_size_t cnt;
cnt = 0;
error = 0;
while (uio->uio_resid > 0 && !error) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
if (uio->uio_iovcnt < 0)
panic("memrw");
continue;
}
if (dev2unit(dev) == CDEV_MINOR_MEM) {
v = uio->uio_offset;
kmem_direct_mapped: off = v & PAGE_MASK;
cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
PAGE_MASK);
cnt = min(cnt, PAGE_SIZE - off);
cnt = min(cnt, iov->iov_len);
if (mem_valid(v, cnt)) {
error = EFAULT;
break;
}
if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
uio);
} else {
m.phys_addr = trunc_page(v);
marr = &m;
error = uiomove_fromphys(&marr, off, cnt, uio);
}
}
else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
va = uio->uio_offset;
if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
v = DMAP_TO_PHYS(va);
goto kmem_direct_mapped;
}
va = trunc_page(uio->uio_offset);
eva = round_page(uio->uio_offset
+ iov->iov_len);
/*
* Make sure that all the pages are currently resident
* so that we don't create any zero-fill pages.
*/
for (; va < eva; va += PAGE_SIZE)
if (pmap_extract(kernel_pmap, va) == 0)
return (EFAULT);
prot = (uio->uio_rw == UIO_READ)
? VM_PROT_READ : VM_PROT_WRITE;
va = uio->uio_offset;
if (kernacc((void *) va, iov->iov_len, prot)
== FALSE)
return (EFAULT);
error = uiomove((void *)va, iov->iov_len, uio);
continue;
}
}
return (error);
}
示例2: db_write_bytes
/*
* Write bytes to kernel address space for debugger.
*/
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
jmp_buf jb;
void *prev_jb;
char *dst;
pt_entry_t *ptep0 = NULL;
pt_entry_t oldmap0 = 0;
vm_offset_t addr1;
pt_entry_t *ptep1 = NULL;
pt_entry_t oldmap1 = 0;
int ret;
prev_jb = kdb_jmpbuf(jb);
ret = setjmp(jb);
if (ret == 0) {
if (addr > trunc_page((vm_offset_t)btext) - size &&
addr < round_page((vm_offset_t)etext)) {
ptep0 = vtopte(addr);
oldmap0 = *ptep0;
*ptep0 |= PG_RW;
/*
* Map another page if the data crosses a page
* boundary.
*/
if ((*ptep0 & PG_PS) == 0) {
addr1 = trunc_page(addr + size - 1);
if (trunc_page(addr) != addr1) {
ptep1 = vtopte(addr1);
oldmap1 = *ptep1;
*ptep1 |= PG_RW;
}
} else {
addr1 = trunc_2mpage(addr + size - 1);
if (trunc_2mpage(addr) != addr1) {
ptep1 = vtopte(addr1);
oldmap1 = *ptep1;
*ptep1 |= PG_RW;
}
}
invltlb();
}
dst = (char *)addr;
while (size-- > 0)
*dst++ = *data++;
}
(void)kdb_jmpbuf(prev_jb);
if (ptep0) {
*ptep0 = oldmap0;
if (ptep1)
*ptep1 = oldmap1;
invltlb();
}
return (ret);
}
示例3: netbsd32_vm_default_addr
vaddr_t
netbsd32_vm_default_addr(struct proc *p, vaddr_t base, vsize_t size)
{
return round_page((vaddr_t)(base) + (vsize_t)MAXDSIZ32);
}
示例4: am335x_lcd_attach
static int
am335x_lcd_attach(device_t dev)
{
struct am335x_lcd_softc *sc;
int rid;
int div;
struct panel_info panel;
uint32_t reg, timing0, timing1, timing2;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
uint32_t burst_log;
int err;
size_t dma_size;
uint32_t hbp, hfp, hsw;
uint32_t vbp, vfp, vsw;
uint32_t width, height;
phandle_t root, panel_node;
sc = device_get_softc(dev);
sc->sc_dev = dev;
root = OF_finddevice("/");
if (root == 0) {
device_printf(dev, "failed to get FDT root node\n");
return (ENXIO);
}
panel_node = fdt_find_compatible(root, "ti,tilcdc,panel", 1);
if (panel_node == 0) {
device_printf(dev, "failed to find compatible panel in FDT blob\n");
return (ENXIO);
}
if (am335x_read_panel_info(dev, panel_node, &panel)) {
device_printf(dev, "failed to read panel info\n");
return (ENXIO);
}
if (am335x_read_timing(dev, panel_node, &panel)) {
device_printf(dev, "failed to read timings\n");
return (ENXIO);
}
int ref_freq = 0;
ti_prcm_clk_enable(LCDC_CLK);
if (ti_prcm_clk_get_source_freq(LCDC_CLK, &ref_freq)) {
device_printf(dev, "Can't get reference frequency\n");
return (ENXIO);
}
rid = 0;
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!sc->sc_mem_res) {
device_printf(dev, "cannot allocate memory window\n");
return (ENXIO);
}
rid = 0;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (!sc->sc_irq_res) {
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
device_printf(dev, "cannot allocate interrupt\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, am335x_lcd_intr, sc,
&sc->sc_intr_hl) != 0) {
bus_release_resource(dev, SYS_RES_IRQ, rid,
sc->sc_irq_res);
bus_release_resource(dev, SYS_RES_MEMORY, rid,
sc->sc_mem_res);
device_printf(dev, "Unable to setup the irq handler.\n");
return (ENXIO);
}
LCD_LOCK_INIT(sc);
/* Panle initialization */
dma_size = round_page(panel.panel_width*panel.panel_height*panel.bpp/8);
/*
* Now allocate framebuffer memory
*/
err = bus_dma_tag_create(
bus_get_dma_tag(dev),
4, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_size, 1, /* maxsize, nsegments */
dma_size, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_dma_tag);
if (err)
goto fail;
err = bus_dmamem_alloc(sc->sc_dma_tag, (void **)&sc->sc_fb_base,
//.........这里部分代码省略.........
示例5: do_posix_fadvise
int
do_posix_fadvise(int fd, off_t offset, off_t len, int advice)
{
file_t *fp;
vnode_t *vp;
off_t endoffset;
int error;
CTASSERT(POSIX_FADV_NORMAL == UVM_ADV_NORMAL);
CTASSERT(POSIX_FADV_RANDOM == UVM_ADV_RANDOM);
CTASSERT(POSIX_FADV_SEQUENTIAL == UVM_ADV_SEQUENTIAL);
if (len == 0) {
endoffset = INT64_MAX;
} else if (len > 0 && (INT64_MAX - offset) >= len) {
endoffset = offset + len;
} else {
return EINVAL;
}
if ((fp = fd_getfile(fd)) == NULL) {
return EBADF;
}
if (fp->f_type != DTYPE_VNODE) {
if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
error = ESPIPE;
} else {
error = EOPNOTSUPP;
}
fd_putfile(fd);
return error;
}
switch (advice) {
case POSIX_FADV_WILLNEED:
case POSIX_FADV_DONTNEED:
vp = fp->f_vnode;
if (vp->v_type != VREG && vp->v_type != VBLK) {
fd_putfile(fd);
return 0;
}
break;
}
switch (advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
case POSIX_FADV_SEQUENTIAL:
/*
* We ignore offset and size. Must lock the file to
* do this, as f_advice is sub-word sized.
*/
mutex_enter(&fp->f_lock);
fp->f_advice = (u_char)advice;
mutex_exit(&fp->f_lock);
error = 0;
break;
case POSIX_FADV_WILLNEED:
vp = fp->f_vnode;
error = uvm_readahead(&vp->v_uobj, offset, endoffset - offset);
break;
case POSIX_FADV_DONTNEED:
vp = fp->f_vnode;
/*
* Align the region to page boundaries as VOP_PUTPAGES expects
* by shrinking it. We shrink instead of expand because we
* do not want to deactivate cache outside of the requested
* region. It means that if the specified region is smaller
* than PAGE_SIZE, we do nothing.
*/
if (round_page(offset) < trunc_page(endoffset) &&
offset <= round_page(offset)) {
mutex_enter(vp->v_interlock);
error = VOP_PUTPAGES(vp,
round_page(offset), trunc_page(endoffset),
PGO_DEACTIVATE | PGO_CLEANIT);
} else {
error = 0;
}
break;
case POSIX_FADV_NOREUSE:
/* Not implemented yet. */
error = 0;
break;
default:
error = EINVAL;
break;
}
fd_putfile(fd);
return error;
}
示例6: initarm
//.........这里部分代码省略.........
/*
* Allocate a page for the system page mapped to V0x00000000
* This page will just contain the system vectors and can be
* shared by all processes.
*/
alloc_pages(systempage.pv_pa, 1);
/* Allocate stacks for all modes */
valloc_pages(irqstack, IRQ_STACK_SIZE);
valloc_pages(abtstack, ABT_STACK_SIZE);
valloc_pages(undstack, UND_STACK_SIZE);
valloc_pages(kernelstack, UPAGES);
/* Allocate enough pages for cleaning the Mini-Data cache. */
KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
valloc_pages(minidataclean, 1);
#ifdef VERBOSE_INIT_ARM
printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
irqstack.pv_va);
printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
abtstack.pv_va);
printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
undstack.pv_va);
printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
kernelstack.pv_va);
#endif
/*
* XXX Defer this to later so that we can reclaim the memory
* XXX used by the RedBoot page tables.
*/
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
/*
* Ok we have allocated physical pages for the primary kernel
* page tables
*/
#ifdef VERBOSE_INIT_ARM
printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif
/*
* Now we start construction of the L1 page table
* We start by mapping the L2 page tables into the L1.
* This means that we can replace L1 mappings later on if necessary
*/
l1pagetable = kernel_l1pt.pv_pa;
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
&kernel_pt_table[KERNEL_PT_SYS]);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_KERNEL + loop]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
/* update the top of the kernel VM */
pmap_curmaxkvaddr =
KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
#ifdef VERBOSE_INIT_ARM
示例7: memrw
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
struct iovec *iov;
int error = 0;
vm_offset_t va, eva, off, v;
vm_prot_t prot;
struct vm_page m;
vm_page_t marr;
vm_size_t cnt;
cnt = 0;
error = 0;
pmap_page_init(&m);
while (uio->uio_resid > 0 && !error) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
if (uio->uio_iovcnt < 0)
panic("memrw");
continue;
}
if (dev2unit(dev) == CDEV_MINOR_MEM) {
v = uio->uio_offset;
off = uio->uio_offset & PAGE_MASK;
cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
PAGE_MASK);
cnt = min(cnt, PAGE_SIZE - off);
cnt = min(cnt, iov->iov_len);
m.phys_addr = trunc_page(v);
marr = &m;
error = uiomove_fromphys(&marr, off, cnt, uio);
}
else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
va = uio->uio_offset;
va = trunc_page(uio->uio_offset);
eva = round_page(uio->uio_offset
+ iov->iov_len);
/*
* Make sure that all the pages are currently resident
* so that we don't create any zero-fill pages.
*/
if (va >= VM_MIN_KERNEL_ADDRESS &&
eva <= VM_MAX_KERNEL_ADDRESS) {
for (; va < eva; va += PAGE_SIZE)
if (pmap_extract(kernel_pmap, va) == 0)
return (EFAULT);
prot = (uio->uio_rw == UIO_READ)
? VM_PROT_READ : VM_PROT_WRITE;
va = uio->uio_offset;
if (kernacc((void *) va, iov->iov_len, prot)
== FALSE)
return (EFAULT);
}
va = uio->uio_offset;
error = uiomove((void *)va, iov->iov_len, uio);
continue;
}
}
return (error);
}
示例8: initarm
void *
initarm(struct arm_boot_params *abp)
{
struct pv_addr kernel_l1pt;
int loop;
u_int l1pagetable;
vm_offset_t freemempos;
vm_offset_t afterkern;
vm_offset_t lastaddr;
int i;
uint32_t memsize;
boothowto = 0; /* Likely not needed */
lastaddr = parse_boot_param(abp);
arm_physmem_kernaddr = abp->abp_physaddr;
i = 0;
set_cpufuncs();
cpufuncs.cf_sleep = s3c24x0_sleep;
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();
#define KERNEL_TEXT_BASE (KERNBASE)
freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
/* Define a macro to simplify memory allocation */
#define valloc_pages(var, np) \
alloc_pages((var).pv_va, (np)); \
(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
#define alloc_pages(var, np) \
(var) = freemempos; \
freemempos += (np * PAGE_SIZE); \
memset((char *)(var), 0, ((np) * PAGE_SIZE));
while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
freemempos += PAGE_SIZE;
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
valloc_pages(kernel_pt_table[loop],
L2_TABLE_SIZE / PAGE_SIZE);
} else {
kernel_pt_table[loop].pv_va = freemempos -
(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
L2_TABLE_SIZE_REAL;
kernel_pt_table[loop].pv_pa =
kernel_pt_table[loop].pv_va - KERNVIRTADDR +
abp->abp_physaddr;
}
}
/*
* Allocate a page for the system page mapped to V0x00000000
* This page will just contain the system vectors and can be
* shared by all processes.
*/
valloc_pages(systempage, 1);
/* Allocate stacks for all modes */
valloc_pages(irqstack, IRQ_STACK_SIZE);
valloc_pages(abtstack, ABT_STACK_SIZE);
valloc_pages(undstack, UND_STACK_SIZE);
valloc_pages(kernelstack, KSTACK_PAGES);
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
/*
* Now we start construction of the L1 page table
* We start by mapping the L2 page tables into the L1.
* This means that we can replace L1 mappings later on if necessary
*/
l1pagetable = kernel_l1pt.pv_va;
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
&kernel_pt_table[KERNEL_PT_SYS]);
for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
&kernel_pt_table[KERNEL_PT_KERN + i]);
pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
(((uint32_t)(lastaddr) - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
- 1));
for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
}
/* Map the vector page. */
pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
//.........这里部分代码省略.........
示例9: AllocateExecutableMemory
void* AllocateExecutableMemory(size_t size, bool low)
{
#if defined(_WIN32)
void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
#elif defined(__SYMBIAN32__)
// On Symbian, we will need to create an RChunk and allocate with ->CreateLocalCode(size, size);
static char *map_hint = 0;
void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, -1, 0);
#else
static char *map_hint = 0;
#if defined(__x86_64__) && !defined(MAP_32BIT)
// This OS has no flag to enforce allocation below the 4 GB boundary,
// but if we hint that we want a low address it is very likely we will
// get one.
// An older version of this code used MAP_FIXED, but that has the side
// effect of discarding already mapped pages that happen to be in the
// requested virtual memory range (such as the emulated RAM, sometimes).
if (low && (!map_hint))
map_hint = (char*)round_page(512*1024*1024); /* 0.5 GB rounded up to the next page */
#endif
void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANON | MAP_PRIVATE
#if defined(__x86_64__) && defined(MAP_32BIT)
| (low ? MAP_32BIT : 0)
#endif
, -1, 0);
#endif /* defined(_WIN32) */
// printf("Mapped executable memory at %p (size %ld)\n", ptr,
// (unsigned long)size);
#if defined(__FreeBSD__)
if (ptr == MAP_FAILED)
{
ptr = NULL;
#else
if (ptr == NULL)
{
#endif
PanicAlert("Failed to allocate executable memory");
}
#if !defined(_WIN32) && defined(__x86_64__) && !defined(MAP_32BIT)
else
{
if (low)
{
map_hint += size;
map_hint = (char*)round_page(map_hint); /* round up to the next page */
// printf("Next map will (hopefully) be at %p\n", map_hint);
}
}
#endif
#if defined(_M_X64)
if ((u64)ptr >= 0x80000000 && low == true)
PanicAlert("Executable memory ended up above 2GB!");
#endif
return ptr;
}
void* AllocateMemoryPages(size_t size)
{
#ifdef _WIN32
void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
#else
void* ptr = mmap(0, size, PROT_READ | PROT_WRITE,
#ifndef __SYMBIAN32__
MAP_ANON |
#endif
MAP_PRIVATE, -1, 0);
#endif
// printf("Mapped memory at %p (size %ld)\n", ptr,
// (unsigned long)size);
if (ptr == NULL)
PanicAlert("Failed to allocate raw memory");
return ptr;
}
示例10: zreaderr
out_buf_offs += nwrite;
}
void zreaderr (void)
{
zerr = EIO;
longjmp (zerr_jmp_buf, 1);
}
void zerror (const char *msg)
{
zerr = EINVAL;
longjmp (zerr_jmp_buf, 2);
}
/* Try to guess a reasonable output buffer size. */
*buf_len = round_page (from->f_size * 2);
zerr = vm_allocate (mach_task_self (), (vm_address_t *)buf, *buf_len, 1);
if (zerr)
return zerr;
mutex_lock (&unzip_lock);
unzip_read = zread;
unzip_write = zwrite;
unzip_read_error = zreaderr;
unzip_error = zerror;
if (! setjmp (zerr_jmp_buf))
{
if (get_method (0) != 0)
/* Not a happy gzip file. */
示例11: _dmamem_alloc_range
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
*/
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, paddr_t low, paddr_t high)
{
paddr_t curaddr, lastaddr;
vm_page_t m;
struct pglist mlist;
int curseg, error, plaflag;
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
if (flags & BUS_DMA_ZERO)
plaflag |= UVM_PLA_ZERO;
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, plaflag);
if (error)
return (error);
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = TAILQ_FIRST(&mlist);
curseg = 0;
lastaddr = segs[curseg].ds_addr =
(*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
segs[curseg].ds_len = PAGE_SIZE;
m = TAILQ_NEXT(m, pageq);
for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curaddr < low || curaddr >= high) {
printf("vm_page_alloc_memory returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_dmamem_alloc_range");
}
#endif
curaddr = (*t->_pa_to_device)(curaddr);
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}
示例12: _dmamem_map
/*
* Common function for mapping DMA-safe memory. May be called by
* bus-specific DMA memory map functions.
*/
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
caddr_t *kvap, int flags)
{
vaddr_t va, sva;
size_t ssize;
paddr_t pa;
bus_addr_t addr;
int curseg, error, pmap_flags;
if (nsegs == 1) {
pa = (*t->_device_to_pa)(segs[0].ds_addr);
if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
else
*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
return (0);
}
size = round_page(size);
va = uvm_km_valloc(kernel_map, size);
if (va == 0)
return (ENOMEM);
*kvap = (caddr_t)va;
sva = va;
ssize = size;
pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
pmap_flags |= PMAP_NOCACHE;
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
if (size == 0)
panic("_dmamem_map: size botch");
pa = (*t->_device_to_pa)(addr);
error = pmap_enter(pmap_kernel(), va, pa,
VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
VM_PROT_WRITE | pmap_flags);
if (error) {
pmap_update(pmap_kernel());
uvm_km_free(kernel_map, sva, ssize);
return (error);
}
/*
* This is redundant with what pmap_enter() did
* above, but will take care of forcing other
* mappings of the same page (if any) to be
* uncached.
* If there are no multiple mappings of that
* page, this amounts to a noop.
*/
if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
pmap_page_cache(PHYS_TO_VM_PAGE(pa),
PV_UNCACHED);
}
pmap_update(pmap_kernel());
}
return (0);
}
示例13: macho_trim_linkedit
//.........这里部分代码省略.........
}
}
/* was a LINKEDIT segment found? (it damned well better be there!) */
if (linkedit_segment == NULL)
goto finish; /* yowza! */
/* if no DYSYMTAB command was found, just remove the entire LINKEDIT segment */
if (dysymtab == NULL) {
if (swap) macho_unswap(macho);
return (macho_remove_linkedit(macho, amount_trimmed));
}
else {
/* Calculate size of symbol table (including strings):
* # of symbols * sizeof (nlist | nlist_64)...
* + size of string table...
* aligned to 8-byte boundary
*/
u_long symtab_size = (((symtab->nsyms
* (is32bit ? sizeof(struct nlist) : sizeof(struct nlist_64)))
+ symtab->strsize) + 7 ) & ~7;
/* calculate size of relocation entries */
u_long reloc_size = dysymtab->nlocrel * sizeof(struct relocation_info);
/* cache old vmsize */
u_long old_vmsize =
(is32bit
? ((struct segment_command *) linkedit_segment)->vmsize
: ((struct segment_command_64 *) linkedit_segment)->vmsize);
/* calculate new segment size after removal of symtab/stringtab data */
u_long new_vmsize = round_page(reloc_size);
/* If the relocation entries are positioned within the LINKEDIT segment AFTER
* the symbol table, those entries must be moved within the segment. Otherwise,
* the segment can simply be truncated to remove the symbol table.
*/
if (symtab->symoff < dysymtab->locreloff) {
/* move them up within the segment, overwriting the existing symbol table */
memmove(macho + symtab->symoff, macho + dysymtab->locreloff, reloc_size);
/* update the header field */
dysymtab->locreloff = symtab->symoff;
/* clear now-unused data within the segment */
bzero(macho + dysymtab->locreloff + reloc_size, symtab_size);
}
else {
/* symtab/stringtab entries are located after the relocation entries
* in the segment. Therefore, we just have to truncate the segment
* appropriately
*/
bzero(macho + symtab->symoff, symtab_size); /* wipe any existing data */
}
/* update LINKEDIT segment command with new size */
if (is32bit) {
((struct segment_command *) linkedit_segment)->vmsize =
((struct segment_command *) linkedit_segment)->filesize = new_vmsize;
}
else {
((struct segment_command_64 *) linkedit_segment)->vmsize =
((struct segment_command_64 *) linkedit_segment)->filesize = new_vmsize;
}
示例14: cpu_startup
void
cpu_startup()
{
caddr_t v;
int sz;
vaddr_t minaddr, maxaddr;
extern unsigned int avail_end;
extern char cpu_model[];
/*
* Initialize error message buffer.
*/
initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));
/*
* Good {morning,afternoon,evening,night}.
* Also call CPU init on systems that need that.
*/
printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata);
if (dep_call->cpu_conf)
(*dep_call->cpu_conf)();
printf("real mem = %u (%uMB)\n", avail_end,
avail_end/1024/1024);
physmem = btoc(avail_end);
mtpr(AST_NO, PR_ASTLVL);
spl0();
/*
* Find out how much space we need, allocate it, and then give
* everything true virtual addresses.
*/
sz = (int) allocsys((caddr_t)0);
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
if (((unsigned long)allocsys(v) - (unsigned long)v) != sz)
panic("startup: table size inconsistency");
/*
* Determine how many buffers to allocate.
* We allocate bufcachepercent% of memory for buffer space.
*/
if (bufpages == 0)
bufpages = physmem * bufcachepercent / 100;
/* Restrict to at most 25% filled kvm */
if (bufpages >
(VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4)
bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
PAGE_SIZE / 4;
/*
* Allocate a submap for exec arguments. This map effectively limits
* the number of processes exec'ing at any time.
*/
minaddr = vm_map_min(kernel_map);
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
#if VAX46 || VAX48 || VAX49 || VAX53
/*
* Allocate a submap for physio. This map effectively limits the
* number of processes doing physio at any one time.
*
* Note that machines on which all mass storage I/O controllers
* can perform address translation, do not need this.
*/
if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 ||
vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
#endif
printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
ptoa(uvmexp.free)/1024/1024);
/*
* Set up buffers, so they can be used to read disk labels.
*/
bufinit();
#ifdef DDB
if (boothowto & RB_KDB)
Debugger();
#endif
/*
* Configure the system.
*/
if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
user_config();
#else
printf("kernel does not support -c; continuing..\n");
#endif
}
}
示例15: initarm
//.........这里部分代码省略.........
/*
* Allocate a page for the system page mapped to V0x00000000
* This page will just contain the system vectors and can be
* shared by all processes.
*/
alloc_pages(systempage.pv_pa, 1);
/* Allocate stacks for all modes */
valloc_pages(irqstack, IRQ_STACK_SIZE);
valloc_pages(abtstack, ABT_STACK_SIZE);
valloc_pages(undstack, UND_STACK_SIZE);
valloc_pages(kernelstack, UPAGES);
/* Allocate enough pages for cleaning the Mini-Data cache. */
KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
valloc_pages(minidataclean, 1);
#ifdef VERBOSE_INIT_ARM
printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
irqstack.pv_va);
printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
abtstack.pv_va);
printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
undstack.pv_va);
printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
kernelstack.pv_va);
#endif
/*
* XXX Defer this to later so that we can reclaim the memory
* XXX used by the RedBoot page tables.
*/
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
/*
* Ok we have allocated physical pages for the primary kernel
* page tables
*/
#ifdef VERBOSE_INIT_ARM
printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif
/*
* Now we start construction of the L1 page table
* We start by mapping the L2 page tables into the L1.
* This means that we can replace L1 mappings later on if necessary
*/
l1pagetable = kernel_l1pt.pv_pa;
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
&kernel_pt_table[KERNEL_PT_SYS]);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_KERNEL + loop]);
pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
&kernel_pt_table[KERNEL_PT_IOPXS]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
/* update the top of the kernel VM */
pmap_curmaxkvaddr =
KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);