本文整理汇总了C++中round_down函数的典型用法代码示例。如果您正苦于以下问题:C++ round_down函数的具体用法?C++ round_down怎么用?C++ round_down使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了round_down函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: truncate_pagecache_range
/**
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
* @inode: inode
* @lstart: offset of beginning of hole
* @lend: offset of last byte of hole
*
* This function should typically be called before the filesystem
* releases resources associated with the freed range (eg. deallocates
* blocks). This way, pagecache will always stay logically coherent
* with on-disk format, and the filesystem would not have to deal with
* situations such as writepage being called for a page that has already
* had its underlying blocks deallocated.
*/
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
struct address_space *mapping = inode->i_mapping;
loff_t unmap_start = round_up(lstart, PAGE_SIZE);
loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
/*
* This rounding is currently just for example: unmap_mapping_range
* expands its hole outwards, whereas we want it to contract the hole
* inwards. However, existing callers of truncate_pagecache_range are
* doing their own page rounding first. Note that unmap_mapping_range
* allows holelen 0 for all, and we allow lend -1 for end of file.
*/
/*
* Unlike in truncate_pagecache, unmap_mapping_range is called only
* once (before truncating pagecache), and without "even_cows" flag:
* hole-punching should not remove private COWed pages from the hole.
*/
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
truncate_inode_pages_range(mapping, lstart, lend);
}
示例2: add_identity_map
/*
* Adds the specified range to what will become the new identity mappings.
* Once all ranges have been added, the new mapping is activated by calling
* finalize_identity_maps() below.
*/
void add_identity_map(unsigned long start, unsigned long size)
{
struct x86_mapping_info mapping_info = {
.alloc_pgt_page = alloc_pgt_page,
.context = &pgt_data,
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
};
unsigned long end = start + size;
/* Make sure we have a top level page table ready to use. */
if (!level4p)
prepare_level4();
/* Align boundary to 2M. */
start = round_down(start, PMD_SIZE);
end = round_up(end, PMD_SIZE);
if (start >= end)
return;
/* Build the mapping. */
kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
start, end);
}
示例3: print_shadow_for_address
static void print_shadow_for_address(const void *addr)
{
int i;
const void *shadow = kasan_mem_to_shadow(addr);
const void *shadow_row;
shadow_row = (void *)round_down((unsigned long)shadow,
SHADOW_BYTES_PER_ROW)
- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
pr_err("Memory state around the buggy address:\n");
for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
const void *kaddr = kasan_shadow_to_mem(shadow_row);
char buffer[4 + (BITS_PER_LONG/8)*2];
char shadow_buf[SHADOW_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer),
(i == 0) ? ">%p: " : " %p: ", kaddr);
/*
* We should not pass a shadow pointer to generic
* function, because generic functions may try to
* access kasan mapping for the passed address.
*/
memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
print_hex_dump(KERN_ERR, buffer,
DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
shadow_buf, SHADOW_BYTES_PER_ROW, 0);
if (row_is_guilty(shadow_row, shadow))
pr_err("%*c\n",
shadow_pointer_offset(shadow_row, shadow),
'^');
shadow_row += SHADOW_BYTES_PER_ROW;
}
}
示例4: chacha_stream_xor
static int chacha_stream_xor(struct skcipher_request *req,
struct chacha_ctx *ctx, u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
int err;
err = skcipher_walk_virt(&walk, req, false);
crypto_chacha_init(state, ctx, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
示例5: _find_next_bit
/*
* This is a common helper function for find_next_bit and
* find_next_zero_bit. The difference is the "invert" argument, which
* is XORed with each fetched word before searching it for one bits.
*/
static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (!nbits || start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
return min(start + __ffs(tmp), nbits);
}
示例6: page_to_pfn
struct page_ext *lookup_page_ext(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
unsigned long index;
struct page_ext *base;
base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug.
*
* This check is also necessary for ensuring page poisoning
* works as expected when enabled
*/
if (unlikely(!base))
return NULL;
#endif
index = pfn - round_down(node_start_pfn(page_to_nid(page)),
MAX_ORDER_NR_PAGES);
return get_entry(base, index);
}
示例7: ccu_data_offsets_valid
static bool ccu_data_offsets_valid(struct ccu_data *ccu)
{
struct ccu_policy *ccu_policy = &ccu->policy;
u32 limit;
limit = ccu->range - sizeof(u32);
limit = round_down(limit, sizeof(u32));
if (ccu_policy_exists(ccu_policy)) {
if (ccu_policy->enable.offset > limit) {
pr_err("%s: bad policy enable offset for %s "
"(%u > %u)\n", __func__,
ccu->name, ccu_policy->enable.offset, limit);
return false;
}
if (ccu_policy->control.offset > limit) {
pr_err("%s: bad policy control offset for %s "
"(%u > %u)\n", __func__,
ccu->name, ccu_policy->control.offset, limit);
return false;
}
}
return true;
}
示例8: cxl_afu_read_err_buffer
/*
* afu_eb_read:
* Called from sysfs and reads the afu error info buffer. The h/w only supports
* 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
* aligned the function uses a bounce buffer which can be max PAGE_SIZE.
*/
ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count)
{
loff_t aligned_start, aligned_end;
size_t aligned_length;
void *tbuf;
const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
return 0;
/* calculate aligned read window */
count = min((size_t)(afu->eb_len - off), count);
aligned_start = round_down(off, 8);
aligned_end = round_up(off + count, 8);
aligned_length = aligned_end - aligned_start;
/* max we can copy in one read is PAGE_SIZE */
if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
aligned_length = ERR_BUFF_MAX_COPY_SIZE;
count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
}
/* use bounce buffer for copy */
tbuf = (void *)__get_free_page(GFP_TEMPORARY);
if (!tbuf)
return -ENOMEM;
/* perform aligned read from the mmio region */
memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
memcpy(buf, tbuf + (off & 0x7), count);
free_page((unsigned long)tbuf);
return count;
}
示例9: olpc_ofw_detect
/* 어린이를 위한 OLPC 찾기 */
void __init olpc_ofw_detect(void)
{
struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
unsigned long start;
/* ensure OFW booted us by checking for "OFW " string */
if (hdr->ofw_magic != OLPC_OFW_SIG)
return;
olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;
if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
(unsigned long)olpc_ofw_cif);
olpc_ofw_cif = NULL;
return;
}
/* determine where OFW starts in memory */
start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
(unsigned long)olpc_ofw_cif, (-start) >> 20);
reserve_top_address(-start);
}
示例10: kasan_init
void __init kasan_init(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end;
struct memblock_region *reg;
int i;
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
/*
* We are going to perform proper setup of shadow memory.
* At first we should unmap early shadow (clear_pgds() call bellow).
* However, instrumented code couldn't execute without shadow memory.
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
pfn_to_nid(virt_to_pfn(_text)));
/*
* vmemmap_populate() has populated the shadow region that covers the
* kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
* the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
* kasan_populate_zero_shadow() from replacing the page table entries
* (PMD or PTE) at the edges of the shadow region for the kernel
* image.
*/
kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
(void *)mod_shadow_start);
kasan_populate_zero_shadow((void *)kimg_shadow_end,
kasan_mem_to_shadow((void *)PAGE_OFFSET));
if (kimg_shadow_start > mod_shadow_end)
kasan_populate_zero_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
void *end = (void *)__phys_to_virt(reg->base + reg->size);
if (start >= end)
break;
/*
* end + 1 here is intentional. We check several shadow bytes in
* advance to slightly speed up fastpath. In some rare cases
* we could cross boundary of mapped shadow, so we just map
* some more here.
*/
vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
(unsigned long)kasan_mem_to_shadow(end) + 1,
pfn_to_nid(virt_to_pfn(start)));
}
示例11: LOG_TRACE
template < typename IN_PORT_TYPE > int file_descriptor_sink_i_base::_forecastAndProcess( bool &eos, typename std::vector< gr_istream< IN_PORT_TYPE > > &istreams )
{
typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList;
typename _IStreamList::iterator istream = istreams.begin();
int nout = 0;
bool dataReady = false;
if ( !eos ) {
uint64_t max_items_avail = 0;
for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
LOG_TRACE( file_descriptor_sink_i_base, "GET MAX ITEMS: STREAM:" << idx << " NITEMS/SCALARS:"
<< istream->nitems() << "/" << istream->_data.size() );
max_items_avail = std::max( istream->nitems(), max_items_avail );
}
//
// calc number of output items to produce
//
noutput_items = (int) (max_items_avail * gr_sptr->relative_rate ());
noutput_items = round_down (noutput_items, gr_sptr->output_multiple ());
if ( noutput_items <= 0 ) {
LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - MAX ITEMS NOUTPUT/MAX_ITEMS:" << noutput_items << "/" << max_items_avail);
return -1;
}
if ( gr_sptr->fixed_rate() ) {
istream = istreams.begin();
for ( int i=0; istream != istreams.end(); i++, istream++ ) {
int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
if ( gr_sptr->output_multiple_set() ) {
t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
}
if ( t_noutput_items > 0 ) {
if ( noutput_items == 0 ) {
noutput_items = t_noutput_items;
}
if ( t_noutput_items <= noutput_items ) {
noutput_items = t_noutput_items;
}
}
}
LOG_TRACE( file_descriptor_sink_i_base, " FIXED FORECAST NOUTPUT/output_multiple == "
<< noutput_items << "/" << gr_sptr->output_multiple());
}
//
// ask the block how much input they need to produce noutput_items...
// if enough data is available to process then set the dataReady flag
//
int32_t outMultiple = gr_sptr->output_multiple();
while ( !dataReady && noutput_items >= outMultiple ) {
//
// ask the block how much input they need to produce noutput_items...
//
gr_sptr->forecast(noutput_items, _ninput_items_required);
LOG_TRACE( file_descriptor_sink_i_base, "--> FORECAST IN/OUT " << _ninput_items_required[0] << "/" << noutput_items );
istream = istreams.begin();
uint32_t dr_cnt=0;
for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
// check if buffer has enough elements
_input_ready[idx] = false;
if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
_input_ready[idx] = true;
dr_cnt++;
}
LOG_TRACE( file_descriptor_sink_i_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" <<
istream->nelems() << "/" << istream->nitems() << "/" <<
_ninput_items_required[idx] << "/" << _input_ready[idx]);
}
if ( dr_cnt < istreams.size() ) {
if ( outMultiple > 1 ) {
noutput_items -= outMultiple;
} else {
noutput_items /= 2;
}
} else {
dataReady = true;
}
LOG_TRACE( file_descriptor_sink_i_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
}
// check if data is ready...
if ( !dataReady ) {
LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - NOT ENOUGH DATA AVAIL/REQ:"
<< _istreams[0].nitems() << "/" << _ninput_items_required[0] );
return -1;
}
// reset looping variables
int ritems = 0;
int nitems = 0;
// reset caching vectors
_output_items.clear();
_input_items.clear();
_ninput_items.clear();
//.........这里部分代码省略.........
示例12: mx_cma_region_reserve
//.........这里部分代码省略.........
reg->alignment = PAGE_SIZE;
}
if (reg->start) {
if (!memblock_is_region_reserved(reg->start, reg->size)
&& (memblock_reserve(reg->start, reg->size) == 0))
reg->reserved = 1;
else
pr_err("S5P/CMA: Failed to reserve '%s'\n",
reg->name);
continue;
}
paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
reg->size, reg->alignment);
if (paddr != MEMBLOCK_ERROR) {
if (memblock_reserve(paddr, reg->size)) {
pr_err("S5P/CMA: Failed to reserve '%s'\n",
reg->name);
continue;
}
reg->start = paddr;
reg->reserved = 1;
pr_info("name = %s, paddr = 0x%x, size = %d\n", reg->name, paddr, reg->size);
} else {
pr_err("S5P/CMA: No free space in memory for '%s'\n",
reg->name);
}
if (cma_early_region_register(reg)) {
pr_err("S5P/CMA: Failed to register '%s'\n",
reg->name);
memblock_free(reg->start, reg->size);
} else {
paddr_last = min(paddr, paddr_last);
}
}
if (regions_secure && regions_secure->size) {
size_t size_secure = 0;
size_t align_secure, size_region2, aug_size, order_region2;
for (reg = regions_secure; reg->size != 0; reg++)
size_secure += reg->size;
reg--;
/* Entire secure regions will be merged into 2
* consecutive regions. */
align_secure = 1 <<
(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
/* Calculation of a subregion size */
size_region2 = size_secure - align_secure;
order_region2 = get_order(size_region2) + PAGE_SHIFT;
if (order_region2 < 20)
order_region2 = 20; /* 1MB */
order_region2 -= 3; /* divide by 8 */
size_region2 = ALIGN(size_region2, 1 << order_region2);
aug_size = align_secure + size_region2 - size_secure;
if (aug_size > 0)
reg->size += aug_size;
size_secure = ALIGN(size_secure, align_secure);
if (paddr_last >= memblock.current_limit) {
paddr_last = memblock_find_in_range(0,
MEMBLOCK_ALLOC_ACCESSIBLE,
size_secure, reg->alignment);
} else {
paddr_last -= size_secure;
paddr_last = round_down(paddr_last, align_secure);
}
if (paddr_last) {
while (memblock_reserve(paddr_last, size_secure))
paddr_last -= align_secure;
do {
reg->start = paddr_last;
reg->reserved = 1;
paddr_last += reg->size;
if (cma_early_region_register(reg)) {
memblock_free(reg->start, reg->size);
pr_err("S5P/CMA: "
"Failed to register secure region "
"'%s'\n", reg->name);
} else {
size_secure -= reg->size;
}
} while (reg-- != regions_secure);
if (size_secure > 0)
memblock_free(paddr_last, size_secure);
} else {
pr_err("S5P/CMA: Failed to reserve secure regions\n");
}
}
}
示例13: _rtld_map_object
//.........这里部分代码省略.........
dbg(("%s: PT_DYNAMIC %p", obj->path, obj->dynamic));
break;
}
++phdr;
}
phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
obj->entry = (void *)(uintptr_t)ehdr->e_entry;
if (!obj->dynamic) {
_rtld_error("%s: not dynamically linked", path);
goto bad;
}
if (nsegs != 2) {
_rtld_error("%s: wrong number of segments (%d != 2)", path,
nsegs);
goto bad;
}
/*
* Map the entire address space of the object as a file
* region to stake out our contiguous region and establish a
* base for relocation. We use a file mapping so that
* the kernel will give us whatever alignment is appropriate
* for the platform we're running on.
*
* We map it using the text protection, map the data segment
* into the right place, then map an anon segment for the bss
* and unmap the gaps left by padding to alignment.
*/
#ifdef MAP_ALIGNED
base_alignment = segs[0]->p_align;
#endif
base_offset = round_down(segs[0]->p_offset);
base_vaddr = round_down(segs[0]->p_vaddr);
base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz);
text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz);
text_flags = protflags(segs[0]->p_flags);
data_offset = round_down(segs[1]->p_offset);
data_vaddr = round_down(segs[1]->p_vaddr);
data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz);
data_flags = protflags(segs[1]->p_flags);
#ifdef RTLD_LOADER
clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
#endif
obj->textsize = text_vlimit - base_vaddr;
obj->vaddrbase = base_vaddr;
obj->isdynamic = ehdr->e_type == ET_DYN;
obj->phdr_loaded = false;
for (i = 0; i < nsegs; i++) {
if (phdr_vaddr != EA_UNDEF &&
segs[i]->p_vaddr <= phdr_vaddr &&
segs[i]->p_memsz >= phdr_memsz) {
obj->phdr_loaded = true;
break;
}
if (segs[i]->p_offset <= ehdr->e_phoff &&
segs[i]->p_memsz >= phsize) {
phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff;
phdr_memsz = phsize;
obj->phdr_loaded = true;
break;
}
}
示例14: init_stack
// Set up the initial stack page for the new child process with envid 'child'
// using the arguments array pointed to by 'argv',
// which is a null-terminated array of pointers to '\0'-terminated strings.
//
// On success, returns 0 and sets *init_esp
// to the initial stack pointer with which the child should start.
// Returns < 0 on failure.
static int
init_stack(envid_t child, const char **argv, uintptr_t *init_esp)
{
size_t string_size;
int argc, i, r;
char *string_store;
uintptr_t *argv_store;
// Count the number of arguments (argc)
// and the total amount of space needed for strings (string_size).
string_size = 0;
for (argc = 0; argv[argc] != 0; argc++)
string_size += strlen(argv[argc]) + 1;
// Determine where to place the strings and the argv array.
// We set up the 'string_store' and 'argv_store' pointers to point
// into the temporary page at UTEMP.
// Later, we'll remap that page into the child environment
// at (USTACKTOP - PGSIZE).
// strings is the topmost thing on the stack.
string_store = (char *) UTEMP + PGSIZE - string_size;
// argv is below that. There's one argument pointer per argument, plus
// a null pointer.
argv_store = (uintptr_t*) (round_down(string_store, 4) - 4 * (argc + 1));
// Make sure that argv, strings, and the 2 words that hold 'argc'
// and 'argv' themselves will all fit in a single stack page.
if ((void*) (argv_store - 2) < (void*) UTEMP)
return -E_NO_MEM;
// Allocate a page at UTEMP.
if ((r = sys_page_alloc(0, (void*) UTEMP, PTE_P|PTE_U|PTE_W)) < 0)
return r;
// Replace this with your code to:
//
// * Initialize 'argv_store[i]' to point to argument string i,
// for all 0 <= i < argc.
// Also, copy the argument strings from 'argv' into the
// newly-allocated stack page.
// Hint: Copy the argument strings into string_store.
// Hint: Make sure that argv_store uses addresses valid in the
// CHILD'S environment! The string_store variable itself
// points into page UTEMP, but the child environment will have
// this page mapped at USTACKTOP - PGSIZE. Check out the
// utemp_addr_to_ustack_addr function defined above.
//
for(i = 0; i < argc; i++){
argv_store[i] = UTEMP2USTACK(string_store);
strcpy(string_store,argv[i]);
string_store += strlen(argv[i])+1;
}
// * Set 'argv_store[argc]' to 0 to null-terminate the args array.
//
argv_store[argc] = 0;
// * Push two more words onto the child's stack below 'args',
// containing the argc and argv parameters to be passed
// to the child's umain() function.
// argv should be below argc on the stack.
// (Again, argv should use an address valid in the child's
// environment.)
//
argv_store[-1] = UTEMP2USTACK(argv_store);
argv_store[-2] = argc;
// * Set *init_esp to the initial stack pointer for the child,
// (Again, use an address valid in the child's environment.)
//
// LAB 4: Your code here.
//*init_esp = USTACKTOP; // Change this!
*init_esp = UTEMP2USTACK(argv_store-2);
// After completing the stack, map it into the child's address space
// and unmap it from ours!
if ((r = sys_page_map(0, (void*) UTEMP, child, (void*) (USTACKTOP - PGSIZE), PTE_P | PTE_U | PTE_W)) < 0)
goto error;
if ((r = sys_page_unmap(0, (void*) UTEMP)) < 0)
goto error;
return 0;
error:
sys_page_unmap(0, (void*) UTEMP);
return r;
}
示例15: s5p_cma_region_reserve
//.........这里部分代码省略.........
size_t aug_size;
align_secure = 1 <<
(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
/* Calculation of a subregion size */
size_region2 = size_secure - align_secure;
order_region2 = get_order(size_region2) + PAGE_SHIFT;
if (order_region2 < 20)
order_region2 = 20; /* 1MB */
order_region2 -= 3; /* divide by 8 */
size_region2 = ALIGN(size_region2, 1 << order_region2);
aug_size = align_secure + size_region2 - size_secure;
if (aug_size > 0) {
reg->size += aug_size;
size_secure += aug_size;
pr_debug("S5P/CMA: "
"Augmented size of '%s' by %#x B.\n",
reg->name, aug_size);
}
} else
size_secure = ALIGN(size_secure, align_secure);
pr_info("S5P/CMA: "
"Reserving %#x for secure region aligned by %#x.\n",
size_secure, align_secure);
if (paddr_last >= memblock.current_limit) {
paddr_last = memblock_find_in_range(0,
MEMBLOCK_ALLOC_ACCESSIBLE,
size_secure, reg->alignment);
} else {
paddr_last -= size_secure;
paddr_last = round_down(paddr_last, align_secure);
}
if (paddr_last) {
pr_info("S5P/CMA: "
"Reserved 0x%08x/0x%08x for 'secure_region'\n",
paddr_last, size_secure);
#ifndef CONFIG_DMA_CMA
while (memblock_reserve(paddr_last, size_secure))
paddr_last -= align_secure;
#else
if (!reg->start) {
while (memblock_reserve(paddr_last,
size_secure))
paddr_last -= align_secure;
}
#endif
do {
#ifndef CONFIG_DMA_CMA
reg->start = paddr_last;
reg->reserved = 1;
paddr_last += reg->size;
#else
if (reg->start) {
reg->reserved = 1;
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
if (reg->start == 0x5C100000) {
if (memblock_reserve(0x5C100000,
0x700000))
panic("memblock\n");
if (memblock_reserve(0x5F000000,
0x200000))
panic("memblock\n");