本文整理汇总了C++中ROUNDDOWN函数的典型用法代码示例。如果您正苦于以下问题:C++ ROUNDDOWN函数的具体用法?C++ ROUNDDOWN怎么用?C++ ROUNDDOWN使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ROUNDDOWN函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dumbfork
envid_t
dumbfork(void)
{
envid_t envid;
uint8_t *addr;
int r;
extern unsigned char end[];
// Allocate a new child environment.
// The kernel will initialize it with a copy of our register state,
// so that the child will appear to have called sys_exofork() too -
// except that in the child, this "fake" call to sys_exofork()
// will return 0 instead of the envid of the child.
envid = sys_exofork();
if (envid < 0)
panic("sys_exofork: %e", envid);
if (envid == 0) {
// We're the child.
// The copied value of the global variable 'thisenv'
// is no longer valid (it refers to the parent!).
// Fix it and return 0.
cprintf("reaching in child....\n");
thisenv = &envs[ENVX(sys_getenvid())];
return 0;
}
// We're the parent.
// Eagerly copy our entire address space into the child.
// This is NOT what you should do in your fork implementation.
for (addr = (uint8_t*) UTEXT; addr < end; addr += PGSIZE)
duppage(envid, addr);
// Also copy the stack we are currently running on.
duppage(envid, ROUNDDOWN(&addr, PGSIZE));
// Start the child environment running
if ((r = sys_env_set_status(envid, ENV_RUNNABLE)) < 0)
panic("sys_env_set_status: %e", r);
return envid;
}
示例2: flush_block
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
uint64_t blockno = ((uint64_t)addr - DISKMAP) / BLKSIZE;
int r;
if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
panic("flush_block of bad va %08x", addr);
// LAB 5: Your code here.
void *dst_addr = (void *)(ROUNDDOWN(addr, PGSIZE));
// Project addition --> Transparent disk encryption.
if(va_is_mapped(dst_addr))
{
if(va_is_dirty(dst_addr))
{
if((blockno == 0) || (blockno == 2))
ide_write(blockno * BLKSECTS, dst_addr, BLKSECTS);
else if((blockno == 1))
{
if(!s_encrypted)
ide_write(blockno * BLKSECTS, dst_addr, BLKSECTS);
else
{
r = transparent_disk_encrypt(blockno, dst_addr);
if(r)
return;
}
}
else
{
r = transparent_disk_encrypt(blockno, dst_addr);
if(r)
return;
}
sys_page_map(thisenv->env_id, dst_addr, thisenv->env_id, dst_addr, PTE_SYSCALL);
}
}
// panic("flush_block not implemented");
}
示例3: pgfault
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
void *addr = (void *) utf->utf_fault_va;
uint32_t err = utf->utf_err;
int r;
// Check that the faulting access was (1) a write, and (2) to a
// copy-on-write page. If not, panic.
// Hint:
// Use the read-only page table mappings at vpt
// (see <inc/memlayout.h>).
//cprintf("pgfault: do page fault here %x\n",utf->utf_eflags);
// LAB 4: Your code here.
if((err & FEC_WR) == 0)
panic("pgfault: fault is not a write (err: %08x va: %08x ip: %08x)",err, addr, utf->utf_eip);
if ((vpd[PDX(addr)] & PTE_P) == 0 || (vpt[PGNUM(addr)] & PTE_COW) == 0)
panic ("pgfault: not a write or attempting to access a non-COW page");
// Allocate a new page, map it at a temporary location (PFTEMP),
// copy the data from the old page to the new page, then move the new
// page to the old page's address.
// Hint:
// You should make three system calls.
// No need to explicitly delete the old page's mapping.
// LAB 4: Your code here.
if ((r = sys_page_alloc (0, (void *)PFTEMP, PTE_U|PTE_P|PTE_W)) < 0)
panic ("pgfault: page allocation failed : %e", r);
addr = ROUNDDOWN (addr, PGSIZE);
memmove (PFTEMP, addr, PGSIZE);
if((r = sys_page_map (0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) < 0)
panic ("pgfault: page mapping failed : %e", r);
if((r = sys_page_unmap(0,PFTEMP)) < 0)
panic("pgfault: page unmapping failed : %e", r);
//cprintf("pgfault: finish\n");
/* int gaga = 0; */
/* __asm__ volatile("movl %%esp, %0\n\t" */
/* :"=r"(gaga) */
/* ::); */
/* cprintf("gaga----------%x\n", gaga); */
//panic("pgfault not implemented");
}
示例4: check_pgfault
// check_pgfault - check correctness of pgfault handler
static void
check_pgfault(void) {
size_t nr_free_pages_store = nr_free_pages();
check_mm_struct = mm_create();
assert(check_mm_struct != NULL);
struct mm_struct *mm = check_mm_struct;
pde_t *pgdir = mm->pgdir = boot_pgdir;
assert(pgdir[0] == 0);
struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
assert(vma != NULL);
insert_vma_struct(mm, vma);
uintptr_t addr = 0x100;
assert(find_vma(mm, addr) == vma);
int i, sum = 0;
for (i = 0; i < 100; i ++) {
*(char *)(addr + i) = i;
sum += i;
}
for (i = 0; i < 100; i ++) {
sum -= *(char *)(addr + i);
}
assert(sum == 0);
page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
free_page(pde2page(pgdir[0]));
pgdir[0] = 0;
mm->pgdir = NULL;
mm_destroy(mm);
check_mm_struct = NULL;
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_pgfault() succeeded!\n");
}
示例5: sys_copy_mem
int // Only copies 1024 bytes! server and client call
sys_copy_mem(envid_t env_id, void* addr, void* buf, int perm, bool frombuf)
{
void *pgva = (void *) ROUNDDOWN(addr, PGSIZE);
if (sys_page_map(env_id, pgva, curenv->env_id, (void *) UTEMP,
perm) < 0)
return -E_INVAL;
if (frombuf) {
memmove((void *) (UTEMP + PGOFF(addr)), buf, 1024);
}
else {
memmove(buf, (void *) (UTEMP + PGOFF(addr)), 1024);
}
if (sys_page_unmap(curenv->env_id, (void *) UTEMP) < 0)
return -E_INVAL;
return 0;
}
示例6: AlignMemoryRange
UINT32
AlignMemoryRange (
IN UINT32 Addr,
IN OUT UINTN *Size,
OUT UINTN *AddrOffset,
IN UINTN Alignment
)
{
// align range
UINT32 AddrAligned = ROUNDDOWN(Addr, Alignment);
// calculate offset
UINTN Offset = Addr - AddrAligned;
if (AddrOffset!=NULL)
*AddrOffset = Offset;
// round and return size
*Size = ROUNDUP(Offset + (*Size), Alignment);
return AddrAligned;
}
示例7: flush_block
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_USER constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
panic("flush_block of bad va %08x", addr);
// LAB 5: Your code here.
int r;
void *blkva;
blkva=ROUNDDOWN(addr,BLKSIZE);
if(va_is_mapped(addr)&&va_is_dirty(addr))
{
ide_write(blockno*BLKSECTS,blkva,BLKSECTS);
if((r=sys_page_map(0,blkva,0,blkva,PTE_USER))<0)
panic("page mapping failed:%e\n",r);
}
//panic("flush_block not implemented");
}
示例8: flush_block
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
panic("flush_block of bad va %08x", addr);
// LAB 5: Your code here.
if( !va_is_mapped(addr) || !(uvpt[PGNUM(addr)] & PTE_D)) { /* no need to flush */
return;
}
int r;
addr = ROUNDDOWN(addr, PGSIZE);
if( (r = ide_write(blockno * BLKSECTS, addr, (PGSIZE/SECTSIZE))) != 0) {
panic("in flush_block, ide_write: %e", r);
}
if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
panic("in sys_page_map, sys_page_map: %e", r);
}
示例9: unmap_range_pud
static void
unmap_range_pud(pgd_t *pgdir, pud_t *pud, uintptr_t base, uintptr_t start, uintptr_t end) {
#if PUXSHIFT == PGXSHIFT
unmap_range_pmd (pgdir, pud, base, start, end);
#else
assert(start >= 0 && start < end && end <= PUSIZE);
size_t off, size;
uintptr_t la = ROUNDDOWN(start, PMSIZE);
do {
off = start - la, size = PMSIZE - off;
if (size > end - start) {
size = end - start;
}
pud_t *pudp = &pud[PUX(la)];
if (ptep_present(pudp)) {
unmap_range_pmd(pgdir, KADDR(PUD_ADDR(*pudp)), base + la, off, off + size);
}
start += size, la += PMSIZE;
} while (start != 0 && start < end);
#endif
}
示例10: segment_alloc
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
// LAB 3: Your code here.
// (But only if you need it for load_icode.)
//
// Hint: It is easier to use segment_alloc if the caller can pass
// 'va' and 'len' values that are not page-aligned.
// You should round va down, and round len up.
// DEC 7,2010,sunus
uint32_t align_va = ROUNDDOWN((uint32_t)va, PGSIZE);
size_t i,align_len = ROUNDUP(len, PGSIZE);
struct Page *pmem;
for(i = 0 ; i < align_len ; i+= PGSIZE)
{
assert(page_alloc(&pmem) == 0);
assert(page_insert(e->env_pgdir,pmem,va + i,PTE_W|PTE_U) == 0);
}
return ;
}
示例11: user_mem_check
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range. You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission. These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
// LAB 3: Your code here.
struct PageInfo *pg;
pte_t *pte;
pte_t **pte_store=&pte;
uintptr_t end = ROUNDUP((uintptr_t)(va+len),PGSIZE);
uintptr_t start_addr = ROUNDDOWN((uintptr_t)va , PGSIZE) ;
for( ; start_addr < end ; start_addr +=PGSIZE ){
pg=page_lookup(env->env_pgdir,(void*)start_addr,pte_store);
if( (!pg) || ((**pte_store & perm) != perm) ||
start_addr >= ULIM){
user_mem_check_addr = start_addr<(uintptr_t )va ?(uintptr_t )va: start_addr;
return -E_FAULT;
}
}
return 0;
}
示例12: segment_alloc
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
// LAB 3: Your code here.
// (But only if you need it for load_icode.)
//
// Hint: It is easier to use segment_alloc if the caller can pass
// 'va' and 'len' values that are not page-aligned.
// You should round va down, and round len up.
len = ROUNDUP(len, PGSIZE);//round up len
va = ROUNDDOWN(va, PGSIZE);//round va down;
int i, r;
pte_t *pgtb;
struct Page *p = NULL;
for(i=0; i<len; i+=PGSIZE) {
if ((r = page_alloc(&p)) < 0)
panic("segment_alloc : No more free memory.\n");
page_insert(e->env_pgdir, p, (void *)(va+i), PTE_USER);
}
//cprintf("segment_alloc is over.\n");
}
示例13: pgfault
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
void *addr = (void *) utf->utf_fault_va;
uint32_t err = utf->utf_err;
// Round the address down to the nearest page
addr = ROUNDDOWN(addr, PGSIZE);
// Check that the faulting access was (1) a write, and (2) to a
// copy-on-write page. If not, panic.
// Hint:
// Use the read-only page table mappings at uvpt
// (see <inc/memlayout.h>).
//
// If the second bit of err is not set, the fault is a read
if((err&2) == 0)
panic("fault was not caused by a write\n");
if((uvpt[PGNUM(addr)]&PTE_COW) == 0)
panic("faulting page was not copy-on-write");
// Allocate a new page, map it at a temporary location (PFTEMP),
// copy the data from the old page to the new page, then move the new
// page to the old page's address.
// Hint:
// You should make three system calls.
// No need to explicitly delete the old page's mapping.
//
// First attempt to allocate a new page at the temporary address
if(sys_page_alloc(0, PFTEMP, PTE_U|PTE_W) != 0)
panic("couldn't allocate a new page for copy-on-write");
// Next, copy the memory from the old page to the new
memcpy(PFTEMP, addr, PGSIZE);
// Finally, remap the new page to the old address. Don't bother
// cleaning up PFTEMP.
if(sys_page_map(0, PFTEMP, 0, addr, PTE_U|PTE_W) != 0)
panic("couldn't remap the temporary page for copy-on-write");
}
示例14: core_mmu_populate_user_map
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
struct tee_mmu_info *mmu)
{
struct core_mmu_table_info pg_info;
struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
struct pgt *pgt;
size_t n;
vaddr_t base;
vaddr_t end;
if (!mmu->size)
return; /* Nothing to map */
/* Find the last valid entry */
n = mmu->size;
while (true) {
n--;
if (mmu->table[n].size)
break;
if (!n)
return; /* Nothing to map */
}
/*
* Allocate all page tables in advance.
*/
base = ROUNDDOWN(mmu->table[0].va, CORE_MMU_PGDIR_SIZE);
end = ROUNDUP(mmu->table[n].va + mmu->table[n].size,
CORE_MMU_PGDIR_SIZE);
pgt_alloc(pgt_cache, (end - base) >> CORE_MMU_PGDIR_SHIFT);
pgt = SLIST_FIRST(pgt_cache);
core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
for (n = 0; n < mmu->size; n++) {
if (!mmu->table[n].size)
continue;
set_pg_region(dir_info, mmu->table + n, &pgt, &pg_info);
}
}
示例15: sys_page_alloc
// Allocate a page of memory and map it at 'va' with permission
// 'perm' in the address space of 'envid'.
// The page's contents are set to 0.
// If a page is already mapped at 'va', that page is unmapped as a
// side effect.
//
// perm -- PTE_U | PTE_P must be set, PTE_AVAIL | PTE_W may or may not be set,
// but no other bits may be set.
//
// Return 0 on success, < 0 on error. Errors are:
// -E_BAD_ENV if environment envid doesn't currently exist,
// or the caller doesn't have permission to change envid.
// -E_INVAL if va >= UTOP, or va is not page-aligned.
// -E_INVAL if perm is inappropriate (see above).
// -E_NO_MEM if there's no memory to allocate the new page,
// or to allocate any necessary page tables.
static int
sys_page_alloc(envid_t envid, void *va, int perm)
{
// Hint: This function is a wrapper around page_alloc() and
// page_insert() from kern/pmap.c.
// Most of the new code you write should be to check the
// parameters for correctness.
// If page_insert() fails, remember to free the page you
// allocated!
// LAB 4: Your code here.
struct Env *task;
struct Page *page;
//cprintf("sys_page_alloc: [%08x] .\n", envid);
if (envid2env(envid, &task, 1) < 0)
return -E_BAD_ENV;
if (page_alloc(&page) < 0)
return -E_NO_MEM;
if ((unsigned int)va >= UTOP || va != ROUNDDOWN(va, PGSIZE))
return -E_INVAL;
// PTE_U and PTE_P must be set
if (!(perm & PTE_U) || !(perm & PTE_P))
return -E_INVAL;
// other bits than PTE_{U,P,W,AVAIL} are set
if (perm & ((~(PTE_U | PTE_P | PTE_W | PTE_AVAIL)) & 0xfff))
return -E_INVAL;
memset(page2kva(page), 0, PGSIZE);
if (page_insert(task->env_pgdir, page, va, perm) < 0) {
page_free(page);
return -E_NO_MEM;
}
//cprintf("allocated page: [%08x].\n", page2pa(page));
return 0;
}