本文整理汇总了C++中roundup2函数的典型用法代码示例。如果您正苦于以下问题:C++ roundup2函数的具体用法?C++ roundup2怎么用?C++ roundup2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了roundup2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cpy_frm_elfnote
/*
**
** Get the contain of a note.
**
*/
int cpy_frm_elfnote(elfobj* obj, char* note_name, int note_type, void* buf, int len)
{
int i;
Elf_Note* note;
char* start, * end, * cur_name, * data;
for (i = 0; i < obj->header->e_phnum; i++)
{
if (obj->program_headers[i]->p_type == PT_NOTE)
{
start = obj->data + obj->program_headers[i]->p_offset;
end = start + obj->program_headers[i]->p_filesz;
for (; start < end; )
{
note = (Elf_Note*) start;
start += sizeof(Elf_Note);
cur_name = start;
start += roundup2(note->n_namesz, 4);
data = start;
start += roundup2(note->n_descsz, 4);
if (!strcmp(note_name, cur_name) && note->n_type == note_type)
{
memcpy(buf, data, len);
return (len);
/* *buf = data; */
/* *len = note->n_descsz; */
}
}
}
}
return (-1);
}
示例2: tmpfs_strname_neqlen
bool
tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp)
{
const size_t fln = roundup2(fcnp->cn_namelen, TMPFS_NAME_QUANTUM);
const size_t tln = roundup2(tcnp->cn_namelen, TMPFS_NAME_QUANTUM);
return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln);
}
示例3: parsereq
/*
* Return a pointer to the first file handle in the packet.
* If the packet was truncated, return 0.
*/
static const uint32_t *
parsereq(netdissect_options *ndo,
const struct sunrpc_msg *rp, u_int length)
{
const uint32_t *dp;
u_int len, rounded_len;
/*
* Find the start of the req data (if we captured it).
* First, get the length of the credentials, and make sure
* we have all of the opaque part of the credentials.
*/
dp = (const uint32_t *)&rp->rm_call.cb_cred;
if (length < 2 * sizeof(*dp))
goto trunc;
ND_TCHECK_4(dp + 1);
len = EXTRACT_BE_U_4(dp + 1);
rounded_len = roundup2(len, 4);
ND_TCHECK_LEN(dp + 2, rounded_len);
if (2 * sizeof(*dp) + rounded_len <= length) {
/*
* We have all of the credentials. Skip past them; they
* consist of 4 bytes of flavor, 4 bytes of length,
* and len-rounded-up-to-a-multiple-of-4 bytes of
* data.
*/
dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp);
length -= 2 * sizeof(*dp) + rounded_len;
/*
* Now get the length of the verifier, and make sure
* we have all of the opaque part of the verifier.
*/
if (length < 2 * sizeof(*dp))
goto trunc;
ND_TCHECK_4(dp + 1);
len = EXTRACT_BE_U_4(dp + 1);
rounded_len = roundup2(len, 4);
ND_TCHECK_LEN(dp + 2, rounded_len);
if (2 * sizeof(*dp) + rounded_len < length) {
/*
* We have all of the verifier. Skip past it;
* it consists of 4 bytes of flavor, 4 bytes of
* length, and len-rounded-up-to-a-multiple-of-4
* bytes of data.
*/
dp += (len + (2 * sizeof(*dp) + 3)) / sizeof(*dp);
return (dp);
}
}
trunc:
return (NULL);
}
示例4: _rtld_tls_offset_allocate
int
_rtld_tls_offset_allocate(Obj_Entry *obj)
{
size_t offset, next_offset;
if (obj->tls_done)
return 0;
if (obj->tlssize == 0) {
obj->tlsoffset = 0;
obj->tls_done = 1;
return 0;
}
#ifdef __HAVE_TLS_VARIANT_I
offset = roundup2(_rtld_tls_static_offset, obj->tlsalign);
next_offset = offset + obj->tlssize;
#else
offset = roundup2(_rtld_tls_static_offset + obj->tlssize,
obj->tlsalign);
next_offset = offset;
#endif
/*
* Check if the static allocation was already done.
* This happens if dynamically loaded modules want to use
* static TLS space.
*
* XXX Keep an actual free list and callbacks for initialisation.
*/
if (_rtld_tls_static_space) {
if (obj->tlsinitsize) {
_rtld_error("%s: Use of initialized "
"Thread Local Storage with model initial-exec "
"and dlopen is not supported",
obj->path);
return -1;
}
if (next_offset > _rtld_tls_static_space) {
_rtld_error("%s: No space available "
"for static Thread Local Storage",
obj->path);
return -1;
}
}
obj->tlsoffset = offset;
_rtld_tls_static_offset = next_offset;
obj->tls_done = 1;
return 0;
}
示例5: _rtld_tls_allocate
struct tls_tcb *
_rtld_tls_allocate(void)
{
struct tls_tcb *tcb;
uint8_t *p;
if (initial_thread_tcb == NULL) {
#ifdef __HAVE_TLS_VARIANT_II
tls_size = roundup2(tls_size, sizeof(void *));
#endif
tls_allocation = tls_size + sizeof(*tcb);
initial_thread_tcb = p = mmap(NULL, tls_allocation,
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
} else {
p = calloc(1, tls_allocation);
}
if (p == NULL) {
static const char msg[] = "TLS allocation failed, terminating\n";
write(STDERR_FILENO, msg, sizeof(msg));
_exit(127);
}
#ifdef __HAVE_TLS_VARIANT_I
/* LINTED */
tcb = (struct tls_tcb *)p;
p += sizeof(struct tls_tcb);
#else
/* LINTED tls_size is rounded above */
tcb = (struct tls_tcb *)(p + tls_size);
tcb->tcb_self = tcb;
#endif
memcpy(p, tls_initaddr, tls_initsize);
return tcb;
}
示例6: ieee80211_getmgtframe
/*
* Allocate and setup a management frame of the specified
* size. We return the mbuf and a pointer to the start
* of the contiguous data area that's been reserved based
* on the packet length. The data area is forced to 32-bit
* alignment and the buffer length to a multiple of 4 bytes.
* This is done mainly so beacon frames (that require this)
* can use this interface too.
*/
struct mbuf *
ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
{
struct mbuf *m;
u_int len;
/*
* NB: we know the mbuf routines will align the data area
* so we don't need to do anything special.
*/
len = roundup2(headroom + pktlen, 4);
KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
if (len < MINCLSIZE) {
m = m_gethdr(M_NOWAIT, MT_DATA);
/*
* Align the data in case additional headers are added.
* This should only happen when a WEP header is added
* which only happens for shared key authentication mgt
* frames which all fit in MHLEN.
*/
if (m != NULL)
MH_ALIGN(m, len);
} else {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m != NULL)
MC_ALIGN(m, len);
}
if (m != NULL) {
m->m_data += headroom;
*frm = m->m_data;
}
return m;
}
示例7: set_render_target
static void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
u32 cb_color_info;
int pitch, slice;
RING_LOCALS;
DRM_DEBUG("\n");
h = roundup2(h, 8);
if (h < 8)
h = 8;
cb_color_info = ((format << 2) | (1 << 27));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
BEGIN_RING(21 + 2);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(gpu_addr >> 8);
OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
OUT_RING(2 << 0);
} else {
示例8: efi_copy_init
int
efi_copy_init(void)
{
EFI_STATUS status;
status = BS->AllocatePages(AllocateAnyPages, EfiLoaderData,
STAGE_PAGES, &staging);
if (EFI_ERROR(status)) {
printf("failed to allocate staging area: %lu\n",
EFI_ERROR_CODE(status));
return (status);
}
staging_end = staging + STAGE_PAGES * EFI_PAGE_SIZE;
#if defined(__aarch64__) || defined(__arm__)
/*
* Round the kernel load address to a 2MiB value. This is needed
* because the kernel builds a page table based on where it has
* been loaded in physical address space. As the kernel will use
* either a 1MiB or 2MiB page for this we need to make sure it
* is correctly aligned for both cases.
*/
staging = roundup2(staging, 2 * 1024 * 1024);
#endif
return (0);
}
示例9: i915_gem_context_init
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ctx_size;
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
return;
}
/* If called from reset, or thaw... we've been here already */
if (dev_priv->hw_contexts_disabled ||
dev_priv->ring[RCS].default_context)
return;
ctx_size = get_context_size(dev);
dev_priv->hw_context_size = get_context_size(dev);
dev_priv->hw_context_size = roundup2(dev_priv->hw_context_size, 4096);
if (ctx_size <= 0 || ctx_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
return;
}
DRM_DEBUG_DRIVER("HW context support initialized\n");
}
示例10: workqueue_create
int
workqueue_create(struct workqueue **wqp, const char *name,
void (*callback_func)(struct work *, void *), void *callback_arg,
pri_t prio, int ipl, int flags)
{
struct workqueue *wq;
struct workqueue_queue *q;
void *ptr;
int error = 0;
CTASSERT(sizeof(work_impl_t) <= sizeof(struct work));
ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP);
wq = (void *)roundup2((uintptr_t)ptr, coherency_unit);
wq->wq_ptr = ptr;
wq->wq_flags = flags;
workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
if (flags & WQ_PERCPU) {
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
/* create the work-queue for each CPU */
for (CPU_INFO_FOREACH(cii, ci)) {
q = workqueue_queue_lookup(wq, ci);
error = workqueue_initqueue(wq, q, ipl, ci);
if (error) {
break;
}
}
} else {
示例11: radeon_mode_dumb_create
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_gem_object *gobj;
uint32_t handle;
int r;
args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->size = args->pitch * args->height;
args->size = roundup2(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_device,
&gobj);
if (r)
return -ENOMEM;
handle = 0;
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
return r;
}
args->handle = handle;
return 0;
}
示例12: snd_unit_init
/*
* This *must* be called first before any of the functions above!!!
*/
void
snd_unit_init(void)
{
int i;
if (snd_unit_initialized != 0)
return;
snd_unit_initialized = 1;
if (getenv_int("hw.snd.maxunit", &i) != 0) {
if (i < SND_UNIT_UMIN)
i = SND_UNIT_UMIN;
else if (i > SND_UNIT_UMAX)
i = SND_UNIT_UMAX;
else
i = roundup2(i, 2);
for (snd_u_shift = 0; (i >> (snd_u_shift + 1)) != 0;
snd_u_shift++)
;
/*
* Make room for channels/clones allocation unit
* to fit within 24bit MAXMINOR limit.
*/
snd_c_shift = 24 - snd_u_shift - snd_d_shift;
}
if (bootverbose != 0)
printf("%s() u=0x%08x [%d] d=0x%08x [%d] c=0x%08x [%d]\n",
__func__, SND_U_MASK, snd_max_u() + 1,
SND_D_MASK, snd_max_d() + 1, SND_C_MASK, snd_max_c() + 1);
}
示例13: alloc_buf1_ppods
/*
* Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in
* the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we
* try to allocate enough page pods to accommodate the whole buffer, subject to
* the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page
* pods before failing entirely.
*/
static int
alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p,
unsigned long addr, unsigned int len)
{
int err, tag, npages, nppods;
struct tom_data *d = TOM_DATA(toep->tp_toedev);
#if 0
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
#endif
npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
nppods = min(pages2ppods(npages), MAX_PPODS);
nppods = roundup2(nppods, PPOD_CLUSTER_SIZE);
err = t3_alloc_ppods(d, nppods, &tag);
if (err && nppods > PPOD_CLUSTER_SIZE) {
nppods = PPOD_CLUSTER_SIZE;
err = t3_alloc_ppods(d, nppods, &tag);
}
if (err)
return (ENOMEM);
p->ubuf_nppods = nppods;
p->ubuf_tag = tag;
#if NUM_DDP_KBUF == 1
t3_set_ddp_tag(toep, 1, tag << 6);
#endif
return (0);
}
示例14: finalize_pdu
static struct mbuf *
finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
{
struct icl_pdu *ip = &icp->ip;
uint8_t ulp_submode, padding;
struct mbuf *m, *last;
struct iscsi_bhs *bhs;
/*
* Fix up the data segment mbuf first.
*/
m = ip->ip_data_mbuf;
ulp_submode = icc->ulp_submode;
if (m) {
last = m_last(m);
/*
* Round up the data segment to a 4B boundary. Pad with 0 if
* necessary. There will definitely be room in the mbuf.
*/
padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len;
if (padding) {
bzero(mtod(last, uint8_t *) + last->m_len, padding);
last->m_len += padding;
}
} else {
示例15: _rtld_tls_initial_allocation
void
_rtld_tls_initial_allocation(void)
{
struct tls_tcb *tcb;
_rtld_tls_static_space = _rtld_tls_static_offset +
RTLD_STATIC_TLS_RESERVATION;
#ifndef __HAVE_TLS_VARIANT_I
_rtld_tls_static_space = roundup2(_rtld_tls_static_space,
sizeof(void *));
#endif
dbg(("_rtld_tls_static_space %zu", _rtld_tls_static_space));
tcb = _rtld_tls_allocate_locked();
#ifdef __HAVE___LWP_SETTCB
__lwp_settcb(tcb);
#ifdef __powerpc__
/*
* Save the tcb pointer so that libc can retrieve it. Older
* crt0 will obliterate r2 so there is code in libc to restore it.
*/
_lwp_setprivate(tcb);
#endif
#else
_lwp_setprivate(tcb);
#endif
}