本文整理汇总了C++中ROUNDUP函数的典型用法代码示例。如果您正苦于以下问题:C++ ROUNDUP函数的具体用法?C++ ROUNDUP怎么用?C++ ROUNDUP使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ROUNDUP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: do_mprotect
int do_mprotect(void *addr, size_t len, int prot)
{
/*
return 0;
*/
struct mm_struct *mm = current->mm;
assert(mm != NULL);
if (len == 0) {
return -E_INVAL;
}
uintptr_t start = ROUNDDOWN(addr, PGSIZE);
uintptr_t end = ROUNDUP(addr + len, PGSIZE);
int ret = -E_INVAL;
lock_mm(mm);
while (1) {
struct vma_struct *vma = find_vma(mm, start);
uintptr_t last_end;
if (vma != NULL) {
last_end = vma->vm_end;
}
if (vma == NULL) {
goto out;
} else if (vma->vm_start == start && vma->vm_end == end) {
if (prot & PROT_WRITE) {
vma->vm_flags |= VM_WRITE;
} else {
vma->vm_flags &= ~VM_WRITE;
}
} else {
uintptr_t this_end =
(end <= vma->vm_end) ? end : vma->vm_end;
uintptr_t this_start =
(start >= vma->vm_start) ? start : vma->vm_start;
struct mapped_file_struct mfile = vma->mfile;
mfile.offset += this_start - vma->vm_start;
uint32_t flags = vma->vm_flags;
if ((ret =
mm_unmap_keep_pages(mm, this_start,
this_end - this_start)) != 0) {
goto out;
}
if (prot & PROT_WRITE) {
flags |= VM_WRITE;
} else {
flags &= ~VM_WRITE;
}
if ((ret =
mm_map(mm, this_start, this_end - this_start,
flags, &vma)) != 0) {
goto out;
}
vma->mfile = mfile;
if (vma->mfile.file != NULL) {
filemap_acquire(mfile.file);
}
}
ret = 0;
if (end <= last_end)
break;
start = last_end;
}
out:
unlock_mm(mm);
return ret;
}
示例2: sizeof
// HEADSIGNITURE + BUFFERSIZE + BUFFERID + PFILENAME + LINENUMBER + BUFFER + TAILSIGNITURE
//All the header info must be ULONGs,
//so that the user buffer falls on a word boundary
//The tail must be a byte, since if it was a ULONG it would
//also require a word boundary, but the users buffer could
//be an odd number of bytes, so instead of rounding up, just use BYTE
const ULONG HEADSIZE = sizeof(ULONG); //HEADSIGNITURE
const SIZE_T LENGTHSIZE = sizeof(SIZE_T); //BUFFERSIZE
const ULONG IDSIZE = sizeof(ULONG); //BUFFERID
const ULONG FILENAMESIZE = sizeof(WCHAR*); //PFILENAME
const ULONG LINENUMBERSIZE = sizeof(ULONG); //LINENUMBER
const ULONG TAILSIZE = sizeof(BYTE); //TAILSIGNITURE
const ULONG HEADERSIZE = (ULONG)ROUNDUP(HEADSIZE + LENGTHSIZE + IDSIZE + FILENAMESIZE + LINENUMBERSIZE);
const ULONG FOOTERSIZE = TAILSIZE;
const BYTE HEADSIGN = '{';
const BYTE TAILSIGN = '}';
const BYTE ALLOCSIGN = '$';
const BYTE FREESIGN = 'Z';
#define HEAD_OFFSET(pActual) ((BYTE*)pActual)
#define TAIL_OFFSET(pActual) (USERS_OFFSET(pActual)+BUFFER_LENGTH(pActual))
#define USERS_OFFSET(pActual) (HEAD_OFFSET(pActual) + HEADERSIZE)
#define HEADER_OFFSET(pRequest) ((BYTE*)(pRequest) - HEADERSIZE)
#define LENGTH_OFFSET(pActual) (HEAD_OFFSET(pActual) + HEADSIZE)
示例3: asmmeminit
void
asmmeminit(void)
{
Proc *up = externup();
int i, l;
Asm* assem;
PTE *pte, *pml4;
uintptr va;
uintmem hi, lo, mem, nextmem, pa;
#ifdef ConfCrap
int cx;
#endif /* ConfCrap */
assert(!((sys->vmunmapped|sys->vmend) & machp()->pgszmask[1]));
if((pa = mmuphysaddr(sys->vmunused)) == ~0)
panic("asmmeminit 1");
pa += sys->vmunmapped - sys->vmunused;
mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0);
if(mem != pa)
panic("asmmeminit 2");
DBG("pa %#llux mem %#llux\n", pa, mem);
/* assume already 2MiB aligned*/
assert(ALIGNED(sys->vmunmapped, 2*MiB));
pml4 = UINT2PTR(machp()->pml4->va);
while(sys->vmunmapped < sys->vmend) {
l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc);
DBG("%#p l %d\n", sys->vmunmapped, l);
*pte = pa|PtePS|PteRW|PteP;
sys->vmunmapped += 2*MiB;
pa += 2*MiB;
}
#ifdef ConfCrap
cx = 0;
#endif /* ConfCrap */
for(assem = asmlist; assem != nil; assem = assem->next) {
if(assem->type != AsmMEMORY)
continue;
va = KSEG2+assem->addr;
print("asm: addr %#P end %#P type %d size %P\n",
assem->addr, assem->addr+assem->size,
assem->type, assem->size);
lo = assem->addr;
hi = assem->addr+assem->size;
/* Convert a range into pages */
for(mem = lo; mem < hi; mem = nextmem) {
nextmem = (mem + PGLSZ(0)) & ~machp()->pgszmask[0];
/* Try large pages first */
for(i = m->npgsz - 1; i >= 0; i--) {
if((mem & machp()->pgszmask[i]) != 0)
continue;
if(mem + PGLSZ(i) > hi)
continue;
/* This page fits entirely within the range. */
/* Mark it a usable */
if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0)
panic("asmmeminit 3");
*pte = mem|PteRW|PteP;
if(l > 0)
*pte |= PtePS;
nextmem = mem + PGLSZ(i);
va += PGLSZ(i);
npg[i]++;
break;
}
}
#ifdef ConfCrap
/*
* Fill in conf crap.
*/
if(cx >= nelem(conf.mem))
continue;
lo = ROUNDUP(assem->addr, PGSZ);
//if(lo >= 600ull*MiB)
// continue;
conf.mem[cx].base = lo;
hi = ROUNDDN(hi, PGSZ);
//if(hi > 600ull*MiB)
// hi = 600*MiB;
conf.mem[cx].npage = (hi - lo)/PGSZ;
conf.npage += conf.mem[cx].npage;
print("cm %d: addr %#llux npage %lud\n",
cx, conf.mem[cx].base, conf.mem[cx].npage);
cx++;
#endif /* ConfCrap */
}
print("%d %d %d\n", npg[0], npg[1], npg[2]);
#ifdef ConfCrap
/*
* Fill in more conf crap.
* This is why I hate Plan 9.
//.........这里部分代码省略.........
示例4: nic_alloc
Packet* nic_alloc(NIC* nic, uint16_t size) {
uint8_t* bitmap = (void*)nic + nic->pool.bitmap;
uint32_t count = nic->pool.count;
void* pool = (void*)nic + nic->pool.pool;
uint32_t size2 = sizeof(Packet) + nic->padding_head + size + nic->padding_tail;
uint8_t req = (ROUNDUP(size2, NIC_CHUNK_SIZE)) / NIC_CHUNK_SIZE;
uint32_t index = nic->pool.index;
lock_lock(&nic->pool.lock);
// Find tail
uint32_t idx = 0;
for(idx = index; idx <= count - req; idx++) {
for(uint32_t j = 0; j < req; j++) {
if(bitmap[idx + j] == 0) {
continue;
} else {
idx += j + bitmap[idx + j];
goto next;
}
}
goto found;
next:
;
}
// Find head
for(idx = 0; idx < index - req; idx++) {
for(uint32_t j = 0; j < req; j++) {
if(bitmap[idx + j] == 0) {
continue;
} else {
idx += j + bitmap[idx + j];
goto notfound;
}
}
goto found;
}
notfound:
// Not found
lock_unlock(&nic->pool.lock);
return NULL;
found:
nic->pool.index = idx + req;
for(uint32_t k = 0; k < req; k++) {
bitmap[idx + k] = req - k;
}
nic->pool.used += req;
lock_unlock(&nic->pool.lock);
Packet* packet = pool + (idx * NIC_CHUNK_SIZE);
packet->time = 0;
packet->start = 0;
packet->end = 0;
packet->size = (req * NIC_CHUNK_SIZE) - sizeof(Packet);
return packet;
}
示例5: arch_setup_signal_frame
int
arch_setup_signal_frame(struct thread *t, struct sigaction *sa, int sig, int sig_mask)
{
#warning implement arch_setup_signal_frame
PANIC_UNIMPLEMENTED();
#if 0
struct iframe *frame = x86_64_get_curr_iframe();
uint32 *stack_ptr;
uint32 *code_ptr;
uint32 *regs_ptr;
struct vregs regs;
uint32 stack_buf[6];
int err;
/* do some quick sanity checks */
ASSERT(frame);
ASSERT(is_user_address(frame->user_sp));
// dprintf("arch_setup_signal_frame: thread 0x%x, frame %p, user_esp 0x%x, sig %d\n",
// t->id, frame, frame->user_esp, sig);
if ((int)frame->orig_eax >= 0) {
// we're coming from a syscall
if (((int)frame->eax == ERR_INTERRUPTED) && (sa->sa_flags & SA_RESTART)) {
dprintf("### restarting syscall %d after signal %d\n", frame->orig_eax, sig);
frame->eax = frame->orig_eax;
frame->edx = frame->orig_edx;
frame->eip -= 2;
}
}
// start stuffing stuff on the user stack
stack_ptr = (uint32 *)frame->user_sp;
// store the saved regs onto the user stack
stack_ptr -= ROUNDUP(sizeof(struct vregs)/4, 4);
regs_ptr = stack_ptr;
regs.eip = frame->eip;
regs.eflags = frame->flags;
regs.eax = frame->eax;
regs.ecx = frame->ecx;
regs.edx = frame->edx;
regs.esp = frame->esp;
regs._reserved_1 = frame->user_sp;
regs._reserved_2[0] = frame->edi;
regs._reserved_2[1] = frame->esi;
regs._reserved_2[2] = frame->ebp;
x86_64_fsave((void *)(®s.xregs));
err = user_memcpy(stack_ptr, ®s, sizeof(regs));
if(err < 0)
return err;
// now store a code snippet on the stack
stack_ptr -= ((uint32)x86_64_end_return_from_signal - (uint32)x86_64_return_from_signal)/4;
code_ptr = stack_ptr;
err = user_memcpy(code_ptr, x86_64_return_from_signal,
((uint32)x86_64_end_return_from_signal - (uint32)x86_64_return_from_signal));
if(err < 0)
return err;
// now set up the final part
stack_buf[0] = (uint32)code_ptr; // return address when sa_handler done
stack_buf[1] = sig; // first argument to sa_handler
stack_buf[2] = (uint32)sa->sa_userdata;// second argument to sa_handler
stack_buf[3] = (uint32)regs_ptr; // third argument to sa_handler
stack_buf[4] = sig_mask; // Old signal mask to restore
stack_buf[5] = (uint32)regs_ptr; // Int frame + extra regs to restore
stack_ptr -= sizeof(stack_buf)/4;
err = user_memcpy(stack_ptr, stack_buf, sizeof(stack_buf));
if(err < 0)
return err;
frame->user_esp = (uint32)stack_ptr;
frame->eip = (uint32)sa->sa_handler;
return NO_ERROR;
#endif
}
示例6: ROUNDUP
void OSystem_Wii::setMouseCursor(const void *buf, uint w, uint h, int hotspotX,
int hotspotY, uint32 keycolor,
bool dontScale,
const Graphics::PixelFormat *format) {
gfx_tex_format_t tex_format = GFX_TF_PALETTE_RGB5A3;
uint tw, th;
bool tmpBuf = false;
uint32 oldKeycolor = _mouseKeyColor;
#ifdef USE_RGB_COLOR
if (!format)
_pfCursor = Graphics::PixelFormat::createFormatCLUT8();
else
_pfCursor = *format;
if (_pfCursor.bytesPerPixel > 1) {
tex_format = GFX_TF_RGB5A3;
_mouseKeyColor = keycolor & 0xffff;
tw = ROUNDUP(w, 4);
th = ROUNDUP(h, 4);
if (_pfCursor != _pfRGB3444)
tmpBuf = true;
} else {
#endif
_mouseKeyColor = keycolor & 0xff;
tw = ROUNDUP(w, 8);
th = ROUNDUP(h, 4);
#ifdef USE_RGB_COLOR
}
#endif
if (!gfx_tex_init(&_texMouse, tex_format, TLUT_MOUSE, tw, th)) {
printf("could not init the mouse texture\n");
::abort();
}
gfx_tex_set_bilinear_filter(&_texMouse, _bilinearFilter);
if ((tw != w) || (th != h))
tmpBuf = true;
if (!tmpBuf) {
gfx_tex_convert(&_texMouse, (const byte *)buf);
} else {
u8 bpp = _texMouse.bpp >> 3;
byte *tmp = (byte *) malloc(tw * th * bpp);
if (!tmp) {
printf("could not alloc temp cursor buffer\n");
::abort();
}
if (bpp > 1)
memset(tmp, 0, tw * th * bpp);
else
memset(tmp, _mouseKeyColor, tw * th);
#ifdef USE_RGB_COLOR
if (bpp > 1) {
if (!Graphics::crossBlit(tmp, (const byte *)buf,
tw * _pfRGB3444.bytesPerPixel,
w * _pfCursor.bytesPerPixel,
tw, th, _pfRGB3444, _pfCursor)) {
printf("crossBlit failed (cursor)\n");
::abort();
}
// nasty, shouldn't the frontend set the alpha channel?
u16 *s = (u16 *) buf;
u16 *d = (u16 *) tmp;
for (u16 y = 0; y < h; ++y) {
for (u16 x = 0; x < w; ++x) {
if (*s++ == _mouseKeyColor)
*d++ &= ~(7 << 12);
else
d++;
}
d += tw - w;
}
} else {
#endif
byte *dst = tmp;
const byte *src = (const byte *)buf;
do {
memcpy(dst, src, w * bpp);
src += w * bpp;
dst += tw * bpp;
} while (--h);
#ifdef USE_RGB_COLOR
}
#endif
gfx_tex_convert(&_texMouse, tmp);
free(tmp);
}
_mouseHotspotX = hotspotX;
_mouseHotspotY = hotspotY;
//.........这里部分代码省略.........
示例7: getifinfo
//.........这里部分代码省略.........
size_t needed;
int mib[6];
struct sockaddr *sa;
struct sockaddr_in *sin;
mib[0] = CTL_NET;
mib[1] = PF_ROUTE;
mib[2] = 0;
mib[3] = AF_INET;
mib[4] = NET_RT_DUMP;
mib[5] = 0;
if (sysctl(mib, 6, NULL, &needed, NULL, 0) == -1) {
perror("route-sysctl-estimate");
exit(1);
}
if (needed > 0) {
if ((buf = malloc(needed)) == 0) {
printf("out of space\n");
exit(1);
}
if (sysctl(mib, 6, buf, &needed, NULL, 0) == -1) {
perror("sysctl of routing table");
exit(1);
}
lim = buf + needed;
}
if (buf) {
for (next = buf; next < lim; next += rtm->rtm_msglen) {
rtm = (struct rt_msghdr *)next;
sa = (struct sockaddr *)(rtm + 1);
sin = (struct sockaddr_in *)sa;
if (sin->sin_addr.s_addr == 0) {
sa = (struct sockaddr *)(ROUNDUP(sa->sa_len) + (char *)sa);
sin = (struct sockaddr_in *)sa;
info->gw = sin->sin_addr.s_addr;
break;
}
}
free(buf);
}
}
#endif
// Get wireless link status if wireless
info->sl = 0;
#ifdef linux
if(fwireless != NULL) {
fseek(fwireless, 0, 0);
while(fgets(buf, sizeof(buf), fwireless)) {
r = sscanf(buf, "%s %d %d ", a, &b, &c);
if(strchr(a, ':')) *(strchr(a, ':')) = 0;
if(strcmp(a, parent) == 0) {
info->sl = c;
}
}
}
#ifdef ENABLE_NWN_SUPPORT
if (info->sl == 0) {
info->sl = nwn_get_link(parent);
}
#endif
#elif defined(__OpenBSD__)
{
struct wi_req wreq;
struct ifreq ifr;
wreq.wi_len = WI_MAX_DATALEN;
wreq.wi_type = WI_RID_COMMS_QUALITY;
strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
ifr.ifr_data = (caddr_t)&wreq;
if (ioctl(fd, SIOCGWAVELAN, &ifr) != -1)
info->sl = letoh16(wreq.wi_val[0]);
}
#endif
// Get Total tx/rx bytes
#ifdef linux
if(fdev != NULL) {
fseek(fdev, 0, 0);
while(fgets(buf, sizeof(buf), fdev)) {
r = sscanf(buf, "%s %d %d %d %d %d %d %d %d %d", a, &b, &d,&d,&d,&d,&d,&d,&d, &c);
if(strchr(a, ':')) *(strchr(a, ':')) = 0;
if(strcmp(a, parent) == 0) {
info->bytes = b + c;
}
}
}
#endif
return(0);
}
示例8: ROUNDDOWN
status_t
M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
{
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
page_root_entry *pr = fPagingStructures->pgroot_virt;
page_directory_entry *pd;
page_table_entry *pt;
int index;
do {
index = VADDR_TO_PRENT(start);
if (PRE_TYPE(pr[index]) != DT_ROOT) {
// no pagedir here, move the start up to access the next page
// dir group
start = ROUNDUP(start + 1, kPageDirAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
pd = (page_directory_entry*)MapperGetPageTableAt(
PRE_TO_PA(pr[index]));
// we want the table at rindex, not at rindex%(tbl/page)
//pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
index = VADDR_TO_PDENT(start);
if (PDE_TYPE(pd[index]) != DT_DIR) {
// no pagedir here, move the start up to access the next page
// table group
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
pt = (page_table_entry*)MapperGetPageTableAt(
PDE_TO_PA(pd[index]));
// we want the table at rindex, not at rindex%(tbl/page)
//pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
for (index = VADDR_TO_PTENT(start);
(index < NUM_PAGEENT_PER_TBL) && (start < end);
index++, start += B_PAGE_SIZE) {
if (PTE_TYPE(pt[index]) != DT_PAGE
&& PTE_TYPE(pt[index]) != DT_INDIRECT) {
// page mapping not valid
continue;
}
TRACE("::Unmap: removing page 0x%lx\n", start);
page_table_entry oldEntry
= M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
fMapCount--;
if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have
// been in any TLB.
InvalidatePage(start);
}
}
} while (start != 0 && start < end);
return B_OK;
}
示例9: _ASSERTE
BOOL COledbRecordset::_BindColumns()
{
_ASSERTE(m_rgBindings==NULL);
if( !IsOpen() ) return FALSE;
HRESULT Hr;
m_nCols = 0;
m_pwstrNameBuffer = NULL;
CComQIPtr<IColumnsInfo> spColInfo = m_spRowset;
if( spColInfo == NULL ) return FALSE;
DBCOLUMNINFO* rgColumnInfo = NULL;
ULONG nCols = 0;
Hr = spColInfo->GetColumnInfo(&nCols, &rgColumnInfo, &m_pwstrNameBuffer);
if( FAILED(Hr) ) return _Error(Hr);
// Allocate memory for the bindings array; there is a one-to-one
// mapping between the columns returned from GetColumnInfo() and our
// bindings.
long cbAlloc = nCols * sizeof(DBBINDING);
m_rgBindings = (DBBINDING*) ::CoTaskMemAlloc(cbAlloc);
if( m_rgBindings == NULL ) return FALSE;
::ZeroMemory(m_rgBindings, cbAlloc);
m_iAdjustIndex = 0;
// Construct the binding array element for each column.
ULONG dwOffset = 0;
for( ULONG iCol = 0; iCol < nCols; iCol++ ) {
DBBINDING& b = m_rgBindings[iCol];
b.iOrdinal = rgColumnInfo[iCol].iOrdinal;
b.dwPart = DBPART_VALUE | DBPART_LENGTH | DBPART_STATUS;
b.obStatus = dwOffset;
b.obLength = dwOffset + sizeof(DBSTATUS);
b.obValue = dwOffset + sizeof(DBSTATUS) + sizeof(ULONG);
b.dwMemOwner = DBMEMOWNER_CLIENTOWNED;
b.eParamIO = DBPARAMIO_NOTPARAM;
b.bPrecision = rgColumnInfo[iCol].bPrecision;
b.bScale = rgColumnInfo[iCol].bScale;
// Ignore bookmark column
if( (rgColumnInfo[iCol].dwFlags & DBCOLUMNFLAGS_ISBOOKMARK) != 0 ) m_iAdjustIndex++;
WORD wType = rgColumnInfo[iCol].wType;
switch( wType ) {
case DBTYPE_CY:
case DBTYPE_DECIMAL:
case DBTYPE_NUMERIC:
b.wType = DBTYPE_STR;
b.cbMaxLen = 50; // Allow 50 characters for conversion
break;
case DBTYPE_STR:
case DBTYPE_WSTR:
#ifdef _UNICODE
b.wType = DBTYPE_WSTR;
#else
b.wType = DBTYPE_STR;
#endif
b.cbMaxLen = max(min((rgColumnInfo[iCol].ulColumnSize + 1UL) * sizeof(TCHAR), 1024UL), 0UL);
break;
default:
b.wType = wType;
b.cbMaxLen = max(min(rgColumnInfo[iCol].ulColumnSize, 1024UL), 0UL);
}
// ROUNDUP on all platforms pointers must be aligned properly
#define ROUNDUP_AMOUNT 8
#define ROUNDUP_(size,amount) (((ULONG)(size)+((amount)-1))&~((amount)-1))
#define ROUNDUP(size) ROUNDUP_(size, ROUNDUP_AMOUNT)
// Update the offset past the end of this column's data
dwOffset = b.cbMaxLen + b.obValue;
dwOffset = ROUNDUP(dwOffset);
}
m_nCols = (short) nCols;
m_dwBufferSize = dwOffset;
::CoTaskMemFree(rgColumnInfo);
// Create accessor
CComQIPtr<IAccessor> spAccessor = m_spRowset;
if( spAccessor == NULL ) return FALSE;
Hr = spAccessor->CreateAccessor(DBACCESSOR_ROWDATA, m_nCols, m_rgBindings, 0, &m_hAccessor, NULL);
if( FAILED(Hr) ) return _Error(Hr);
m_pData = ::CoTaskMemAlloc(m_dwBufferSize);
if( m_pData == NULL ) return FALSE;
return TRUE;
}
示例10: LTRACEF
void *heap_alloc(size_t size, unsigned int alignment)
{
void *ptr;
#if DEBUG_HEAP
size_t original_size = size;
#endif
LTRACEF("size %zd, align %d\n", size, alignment);
// deal with the pending free list
if (unlikely(!list_is_empty(&theheap.delayed_free_list))) {
heap_free_delayed_list();
}
// alignment must be power of 2
if (alignment & (alignment - 1))
return NULL;
// we always put a size field + base pointer + magic in front of the allocation
size += sizeof(struct alloc_struct_begin);
#if DEBUG_HEAP
size += PADDING_SIZE;
#endif
// make sure we allocate at least the size of a struct free_heap_chunk so that
// when we free it, we can create a struct free_heap_chunk struct and stick it
// in the spot
if (size < sizeof(struct free_heap_chunk))
size = sizeof(struct free_heap_chunk);
// round up size to a multiple of native pointer size
size = ROUNDUP(size, sizeof(void *));
// deal with nonzero alignments
if (alignment > 0) {
if (alignment < 16)
alignment = 16;
// add alignment for worst case fit
size += alignment;
}
mutex_acquire(&theheap.lock);
// walk through the list
ptr = NULL;
struct free_heap_chunk *chunk;
list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size
// is it big enough to service our allocation?
if (chunk->len >= size) {
ptr = chunk;
// remove it from the list
struct list_node *next_node = list_next(&theheap.free_list, &chunk->node);
list_delete(&chunk->node);
if (chunk->len > size + sizeof(struct free_heap_chunk)) {
// there's enough space in this chunk to create a new one after the allocation
struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size);
// truncate this chunk
chunk->len -= chunk->len - size;
// add the new one where chunk used to be
if (next_node)
list_add_before(next_node, &newchunk->node);
else
list_add_tail(&theheap.free_list, &newchunk->node);
}
// the allocated size is actually the length of this chunk, not the size requested
DEBUG_ASSERT(chunk->len >= size);
size = chunk->len;
#if DEBUG_HEAP
memset(ptr, ALLOC_FILL, size);
#endif
ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin));
// align the output if requested
if (alignment > 0) {
ptr = (void *)ROUNDUP((addr_t)ptr, alignment);
}
struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
as--;
as->magic = HEAP_MAGIC;
as->ptr = (void *)chunk;
as->size = size;
#if DEBUG_HEAP
as->padding_start = ((uint8_t *)ptr + original_size);
as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size));
//printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size);
memset(as->padding_start, PADDING_FILL, as->padding_size);
#endif
break;
//.........这里部分代码省略.........
示例11: write_sparse_image
//.........这里部分代码省略.........
"%s: Request would exceed partition size!\n",
__func__);
fastboot_fail(
"Request would exceed partition size!");
return;
}
blks = info->write(info, blk, blkcnt, data);
/* blks might be > blkcnt (eg. NAND bad-blocks) */
if (blks < blkcnt) {
printf("%s: %s" LBAFU " [" LBAFU "]\n",
__func__, "Write failed, block #",
blk, blks);
fastboot_fail(
"flash write failure");
return;
}
blk += blks;
bytes_written += blkcnt * info->blksz;
total_blocks += chunk_header->chunk_sz;
data += chunk_data_sz;
break;
case CHUNK_TYPE_FILL:
if (chunk_header->total_sz !=
(sparse_header->chunk_hdr_sz + sizeof(uint32_t))) {
fastboot_fail(
"Bogus chunk size for chunk type FILL");
return;
}
fill_buf = (uint32_t *)
memalign(ARCH_DMA_MINALIGN,
ROUNDUP(
info->blksz * fill_buf_num_blks,
ARCH_DMA_MINALIGN));
if (!fill_buf) {
fastboot_fail(
"Malloc failed for: CHUNK_TYPE_FILL");
return;
}
fill_val = *(uint32_t *)data;
data = (char *)data + sizeof(uint32_t);
for (i = 0;
i < (info->blksz * fill_buf_num_blks /
sizeof(fill_val));
i++)
fill_buf[i] = fill_val;
if (blk + blkcnt > info->start + info->size) {
printf(
"%s: Request would exceed partition size!\n",
__func__);
fastboot_fail(
"Request would exceed partition size!");
return;
}
for (i = 0; i < blkcnt;) {
j = blkcnt - i;
if (j > fill_buf_num_blks)
j = fill_buf_num_blks;
blks = info->write(info, blk, j, fill_buf);
/* blks might be > j (eg. NAND bad-blocks) */
示例12: nvram_commit
int
nvram_commit(void)
{
char *buf;
size_t erasesize, len, magic_len;
unsigned int i;
int ret;
struct nvram_header *header;
unsigned long flags;
u_int32_t offset;
DECLARE_WAITQUEUE(wait, current);
wait_queue_head_t wait_q;
struct erase_info erase;
u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */
if (!nvram_mtd) {
printk("nvram_commit: NVRAM not found\n");
return -ENODEV;
}
if (in_interrupt()) {
printk("nvram_commit: not committing in interrupt\n");
return -EINVAL;
}
/* Backup sector blocks to be erased */
erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize);
if (!(buf = kmalloc(erasesize, GFP_KERNEL))) {
printk("nvram_commit: out of memory\n");
return -ENOMEM;
}
down(&nvram_sem);
if ((i = erasesize - NVRAM_SPACE) > 0) {
offset = nvram_mtd->size - erasesize;
len = 0;
ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
if (ret || len != i) {
printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i);
ret = -EIO;
goto done;
}
header = (struct nvram_header *)(buf + i);
magic_offset = i + ((void *)&header->magic - (void *)header);
} else {
offset = nvram_mtd->size - NVRAM_SPACE;
magic_offset = ((void *)&header->magic - (void *)header);
header = (struct nvram_header *)buf;
}
/* clear the existing magic # to mark the NVRAM as unusable
we can pull MAGIC bits low without erase */
header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */
/* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */
if(nvram_mtd->unlock)
nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);
ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic),
&magic_len, (char *)&header->magic);
if (ret || magic_len != sizeof(header->magic)) {
printk("nvram_commit: clear MAGIC error\n");
ret = -EIO;
goto done;
}
header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM,
otherwise we'll have an incorrect CRC */
/* Regenerate NVRAM */
spin_lock_irqsave(&nvram_lock, flags);
ret = _nvram_commit(header);
spin_unlock_irqrestore(&nvram_lock, flags);
if (ret)
goto done;
/* Erase sector blocks */
init_waitqueue_head(&wait_q);
for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) {
erase.mtd = nvram_mtd;
erase.addr = offset;
erase.len = nvram_mtd->erasesize;
erase.callback = erase_callback;
erase.priv = (u_long) &wait_q;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
/* Unlock sector blocks */
if (nvram_mtd->unlock)
nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);
if ((ret = MTD_ERASE(nvram_mtd, &erase))) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
printk("nvram_commit: erase error\n");
goto done;
}
/* Wait for erase to finish */
//.........这里部分代码省略.........
示例13: load_icode
//.........这里部分代码省略.........
end = ph->p_va + ph->p_memsz;
if (start < la) {
// ph->p_memsz == ph->p_filesz
if (start == end) {
continue ;
}
off = start + PGSIZE - la, size = PGSIZE - off;
if (end < la) {
size -= la - end;
}
memset(page2kva(page) + off, 0, size);
start += size;
assert((end < la && start == end) || (end >= la && start == la));
}
while (start < end) {
if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
ret = -E_NO_MEM;
goto bad_cleanup_mmap;
}
off = start - la, size = PGSIZE - off, la += PGSIZE;
if (end < la) {
size -= la - end;
}
memset(page2kva(page) + off, 0, size);
start += size;
}
*/
/**************************************/
}
mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);
/* setup user stack */
vm_flags = VM_READ | VM_WRITE | VM_STACK;
if ((ret =
mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
NULL)) != 0) {
goto bad_cleanup_mmap;
}
if (is_dynamic) {
elf->e_entry += bias;
bias = 0;
off_t phoff =
elf->e_phoff + sizeof(struct proghdr) * interp_idx;
if ((ret =
load_icode_read(fd, ph, sizeof(struct proghdr),
phoff)) != 0) {
goto bad_cleanup_mmap;
}
char *interp_path = (char *)kmalloc(ph->p_filesz);
load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);
int interp_fd = sysfile_open(interp_path, O_RDONLY);
assert(interp_fd >= 0);
struct elfhdr interp___elf, *interp_elf = &interp___elf;
assert((ret =
load_icode_read(interp_fd, interp_elf,
sizeof(struct elfhdr), 0)) == 0);
assert(interp_elf->e_magic == ELF_MAGIC);
示例14: map_ph
//#ifdef UCONFIG_BIONIC_LIBC
static int
map_ph(int fd, struct proghdr *ph, struct mm_struct *mm, uint32_t * pbias,
uint32_t linker)
{
int ret = 0;
struct Page *page;
uint32_t vm_flags = 0;
uint32_t bias = 0;
pte_perm_t perm = 0;
ptep_set_u_read(&perm);
if (ph->p_flags & ELF_PF_X)
vm_flags |= VM_EXEC;
if (ph->p_flags & ELF_PF_W)
vm_flags |= VM_WRITE;
if (ph->p_flags & ELF_PF_R)
vm_flags |= VM_READ;
if (vm_flags & VM_WRITE)
ptep_set_u_write(&perm);
if (pbias) {
bias = *pbias;
}
if (!bias && !ph->p_va) {
bias = get_unmapped_area(mm, ph->p_memsz + PGSIZE);
bias = ROUNDUP(bias, PGSIZE);
if (pbias)
*pbias = bias;
}
if ((ret =
mm_map(mm, ph->p_va + bias, ph->p_memsz, vm_flags, NULL)) != 0) {
goto bad_cleanup_mmap;
}
if (!linker && mm->brk_start < ph->p_va + bias + ph->p_memsz) {
mm->brk_start = ph->p_va + bias + ph->p_memsz;
}
off_t offset = ph->p_offset;
size_t off, size;
uintptr_t start = ph->p_va + bias, end, la = ROUNDDOWN(start, PGSIZE);
end = ph->p_va + bias + ph->p_filesz;
while (start < end) {
if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
ret = -E_NO_MEM;
goto bad_cleanup_mmap;
}
off = start - la, size = PGSIZE - off, la += PGSIZE;
if (end < la) {
size -= la - end;
}
if ((ret =
load_icode_read(fd, page2kva(page) + off, size,
offset)) != 0) {
goto bad_cleanup_mmap;
}
start += size, offset += size;
}
end = ph->p_va + bias + ph->p_memsz;
if (start < la) {
if (start == end) {
goto normal_exit;
}
off = start + PGSIZE - la, size = PGSIZE - off;
if (end < la) {
size -= la - end;
}
memset(page2kva(page) + off, 0, size);
start += size;
assert((end < la && start == end)
|| (end >= la && start == la));
}
while (start < end) {
if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
ret = -E_NO_MEM;
goto bad_cleanup_mmap;
}
off = start - la, size = PGSIZE - off, la += PGSIZE;
if (end < la) {
size -= la - end;
}
memset(page2kva(page) + off, 0, size);
start += size;
}
normal_exit:
return 0;
bad_cleanup_mmap:
return ret;
}
示例15: pfkey_send
static int
pfkey_send(int sd, uint8_t satype, uint8_t mtype, uint8_t dir,
int af, union ldpd_addr *src, union ldpd_addr *dst, uint32_t spi,
uint8_t aalg, int alen, char *akey, uint8_t ealg, int elen, char *ekey,
uint16_t sport, uint16_t dport)
{
struct sadb_msg smsg;
struct sadb_sa sa;
struct sadb_address sa_src, sa_dst;
struct sadb_key sa_akey, sa_ekey;
struct sadb_spirange sa_spirange;
struct iovec iov[IOV_CNT];
ssize_t n;
int len = 0;
int iov_cnt;
struct sockaddr_storage smask, dmask;
union sockunion su_src, su_dst;
if (!pid)
pid = getpid();
/* we need clean sockaddr... no ports set */
memset(&smask, 0, sizeof(smask));
addr2sa(af, src, 0, &su_src);
switch (af) {
case AF_INET:
memset(&((struct sockaddr_in *)&smask)->sin_addr, 0xff, 32/8);
break;
case AF_INET6:
memset(&((struct sockaddr_in6 *)&smask)->sin6_addr, 0xff,
128/8);
break;
default:
return (-1);
}
smask.ss_family = su_src.sa.sa_family;
smask.ss_len = sockaddr_len(&su_src.sa);
memset(&dmask, 0, sizeof(dmask));
addr2sa(af, dst, 0, &su_dst);
switch (af) {
case AF_INET:
memset(&((struct sockaddr_in *)&dmask)->sin_addr, 0xff, 32/8);
break;
case AF_INET6:
memset(&((struct sockaddr_in6 *)&dmask)->sin6_addr, 0xff,
128/8);
break;
default:
return (-1);
}
dmask.ss_family = su_dst.sa.sa_family;
dmask.ss_len = sockaddr_len(&su_dst.sa);
memset(&smsg, 0, sizeof(smsg));
smsg.sadb_msg_version = PF_KEY_V2;
smsg.sadb_msg_seq = ++sadb_msg_seq;
smsg.sadb_msg_pid = pid;
smsg.sadb_msg_len = sizeof(smsg) / 8;
smsg.sadb_msg_type = mtype;
smsg.sadb_msg_satype = satype;
switch (mtype) {
case SADB_GETSPI:
memset(&sa_spirange, 0, sizeof(sa_spirange));
sa_spirange.sadb_spirange_exttype = SADB_EXT_SPIRANGE;
sa_spirange.sadb_spirange_len = sizeof(sa_spirange) / 8;
sa_spirange.sadb_spirange_min = 0x100;
sa_spirange.sadb_spirange_max = 0xffffffff;
sa_spirange.sadb_spirange_reserved = 0;
break;
case SADB_ADD:
case SADB_UPDATE:
case SADB_DELETE:
memset(&sa, 0, sizeof(sa));
sa.sadb_sa_exttype = SADB_EXT_SA;
sa.sadb_sa_len = sizeof(sa) / 8;
sa.sadb_sa_replay = 0;
sa.sadb_sa_spi = htonl(spi);
sa.sadb_sa_state = SADB_SASTATE_MATURE;
break;
}
memset(&sa_src, 0, sizeof(sa_src));
sa_src.sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
sa_src.sadb_address_len =
(sizeof(sa_src) + ROUNDUP(sockaddr_len(&su_src.sa))) / 8;
memset(&sa_dst, 0, sizeof(sa_dst));
sa_dst.sadb_address_exttype = SADB_EXT_ADDRESS_DST;
sa_dst.sadb_address_len =
(sizeof(sa_dst) + ROUNDUP(sockaddr_len(&su_dst.sa))) / 8;
sa.sadb_sa_auth = aalg;
sa.sadb_sa_encrypt = SADB_X_EALG_AES; /* XXX */
//.........这里部分代码省略.........