本文整理汇总了C++中sizeofW函数的典型用法代码示例。如果您正苦于以下问题:C++ sizeofW函数的具体用法?C++ sizeofW怎么用?C++ sizeofW使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sizeofW函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: throwTo
MessageThrowTo *
throwTo (Capability *cap, // the Capability we hold
StgTSO *source, // the TSO sending the exception (or NULL)
StgTSO *target, // the TSO receiving the exception
StgClosure *exception) // the exception closure
{
MessageThrowTo *msg;
msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
// the message starts locked; see below
SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
msg->source = source;
msg->target = target;
msg->exception = exception;
switch (throwToMsg(cap, msg))
{
case THROWTO_SUCCESS:
// unlock the message now, otherwise we leave a WHITEHOLE in
// the heap (#6103)
SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
return NULL;
case THROWTO_BLOCKED:
default:
// the caller will unlock the message when it is ready. We
// cannot unlock it yet, because the calling thread will need
// to tidy up its state first.
return msg;
}
}
示例2: compactAppendBlock
static StgCompactNFDataBlock *
compactAppendBlock (Capability *cap,
StgCompactNFData *str,
StgWord aligned_size)
{
StgCompactNFDataBlock *block;
bdescr *bd;
block = compactAllocateBlockInternal(cap, aligned_size,
compactGetFirstBlock(str),
ALLOCATE_APPEND);
block->owner = str;
block->next = NULL;
ASSERT(str->last->next == NULL);
str->last->next = block;
str->last = block;
bd = Bdescr((P_)block);
bd->free = (StgPtr)((W_)block + sizeof(StgCompactNFDataBlock));
ASSERT(bd->free == (StgPtr)block + sizeofW(StgCompactNFDataBlock));
str->totalW += bd->blocks * BLOCK_SIZE_W;
return block;
}
示例3: thread_AP_STACK
STATIC_INLINE StgPtr
thread_AP_STACK (StgAP_STACK *ap)
{
thread(&ap->fun);
thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
}
示例4: LDV_recordDead
void
LDV_recordDead( StgClosure *c, nat size )
{
void *id;
nat t;
counter *ctr;
if (era > 0 && closureSatisfiesConstraints(c)) {
size -= sizeofW(StgProfHeader);
ASSERT(LDVW(c) != 0);
if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
if (t < era) {
if (RtsFlags.ProfFlags.bioSelector == NULL) {
censuses[t].void_total += (long)size;
censuses[era].void_total -= (long)size;
ASSERT(censuses[t].void_total < censuses[t].not_used);
} else {
id = closureIdentity(c);
ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
ASSERT( ctr != NULL );
ctr->c.ldv.void_total += (long)size;
ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
if (ctr == NULL) {
ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
initLDVCtr(ctr);
insertHashTable(censuses[era].hash, (StgWord)id, ctr);
ctr->identity = id;
ctr->next = censuses[era].ctrs;
censuses[era].ctrs = ctr;
}
ctr->c.ldv.void_total -= (long)size;
}
}
} else {
示例5: rts_mkDouble
HaskellObj
rts_mkDouble (Capability *cap, HsDouble d)
{
StgClosure *p = (StgClosure *)allocate(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
ASSIGN_DBL((P_)p->payload, (StgDouble)d);
return p;
}
示例6: rts_mkFunPtr
HaskellObj
rts_mkFunPtr (Capability *cap, HsFunPtr a)
{
StgClosure *p = (StgClosure *)allocate(cap,sizeofW(StgHeader)+1);
SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
p->payload[0] = (StgClosure *)a;
return p;
}
示例7: evacuate_BLACKHOLE
void
evacuate_BLACKHOLE(StgClosure **p)
{
bdescr *bd;
uint32_t gen_no;
StgClosure *q;
const StgInfoTable *info;
q = *p;
// closure is required to be a heap-allocated BLACKHOLE
ASSERT(HEAP_ALLOCED_GC(q));
ASSERT(GET_CLOSURE_TAG(q) == 0);
bd = Bdescr((P_)q);
// blackholes can't be in a compact
ASSERT((bd->flags & BF_COMPACT) == 0);
// blackholes *can* be in a large object: when raiseAsync() creates an
// AP_STACK the payload might be large enough to create a large object.
// See #14497.
if (bd->flags & BF_LARGE) {
evacuate_large((P_)q);
return;
}
if (bd->flags & BF_EVACUATED) {
if (bd->gen_no < gct->evac_gen_no) {
gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
return;
}
if (bd->flags & BF_MARKED) {
if (!is_marked((P_)q,bd)) {
mark((P_)q,bd);
push_mark_stack((P_)q);
}
return;
}
gen_no = bd->dest_no;
info = q->header.info;
if (IS_FORWARDING_PTR(info))
{
StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
*p = e;
if (gen_no < gct->evac_gen_no) { // optimisation
if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
}
return;
}
ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE);
copy(p,info,q,sizeofW(StgInd),gen_no);
}
示例8: checkHeapChunk
void
checkHeapChunk(StgPtr start, StgPtr end)
{
StgPtr p;
nat size;
for (p=start; p<end; p+=size) {
ASSERT(LOOKS_LIKE_INFO_PTR(*p));
size = checkClosure((StgClosure *)p);
/* This is the smallest size of closure that can live in the heap. */
ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
}
}
示例9: updateAdjacentFrames
static struct stack_gap *
updateAdjacentFrames (Capability *cap, StgTSO *tso, StgUpdateFrame *upd,
uint32_t count, struct stack_gap *next)
{
StgClosure *updatee;
struct stack_gap *gap;
uint32_t i;
// The first one (highest address) is the frame we take the
// "master" updatee from; all the others will be made indirections
// to this one. It is essential that we do it this way around: we
// used to make the lowest-addressed frame the "master" frame and
// shuffle it down, but a bad case cropped up (#5505) where this
// happened repeatedly, generating a chain of indirections which
// the GC repeatedly traversed (indirection chains longer than one
// are not supposed to happen). So now after identifying a block
// of adjacent update frames we walk downwards again updating them
// all to point to the highest one, before squeezing out all but
// the highest one.
updatee = upd->updatee;
count--;
upd--;
gap = (struct stack_gap*)upd;
for (i = count; i > 0; i--, upd--) {
/*
* Check two things: that the two update frames
* don't point to the same object, and that the
* updatee_bypass isn't already an indirection.
* Both of these cases only happen when we're in a
* block hole-style loop (and there are multiple
* update frames on the stack pointing to the same
* closure), but they can both screw us up if we
* don't check.
*/
if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
updateThunk(cap, tso, upd->updatee, updatee);
}
}
gap->gap_size = count * sizeofW(StgUpdateFrame);
gap->next_gap = next;
return gap;
}
示例10: lockCAF
STATIC_INLINE StgInd *
lockCAF (StgRegTable *reg, StgIndStatic *caf)
{
const StgInfoTable *orig_info;
Capability *cap = regTableToCapability(reg);
StgInd *bh;
orig_info = caf->header.info;
#ifdef THREADED_RTS
const StgInfoTable *cur_info;
if (orig_info == &stg_IND_STATIC_info ||
orig_info == &stg_WHITEHOLE_info) {
// already claimed by another thread; re-enter the CAF
return NULL;
}
cur_info = (const StgInfoTable *)
cas((StgVolatilePtr)&caf->header.info,
(StgWord)orig_info,
(StgWord)&stg_WHITEHOLE_info);
if (cur_info != orig_info) {
// already claimed by another thread; re-enter the CAF
return NULL;
}
// successfully claimed by us; overwrite with IND_STATIC
#endif
// For the benefit of revertCAFs(), save the original info pointer
caf->saved_info = orig_info;
// Allocate the blackhole indirection closure
bh = (StgInd *)allocate(cap, sizeofW(*bh));
SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
bh->indirectee = (StgClosure *)cap->r.rCurrentTSO;
caf->indirectee = (StgClosure *)bh;
write_barrier();
SET_INFO((StgClosure*)caf,&stg_IND_STATIC_info);
return bh;
}
示例11: checkHeapChain
void checkHeapChain (bdescr *bd)
{
StgPtr p;
for (; bd != NULL; bd = bd->link) {
if(!(bd->flags & BF_SWEPT)) {
p = bd->start;
while (p < bd->free) {
nat size = checkClosure((StgClosure *)p);
/* This is the smallest size of closure that can live in the heap */
ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
p += size;
/* skip over slop */
while (p < bd->free &&
(*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
}
}
}
}
示例12: stgAllocStable
static void *
stgAllocStable(size_t size_in_bytes, StgStablePtr *stable)
{
StgArrWords* arr;
nat data_size_in_words, total_size_in_words;
/* round up to a whole number of words */
data_size_in_words = ROUNDUP_BYTES_TO_WDS(size_in_bytes);
total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
/* allocate and fill it in */
arr = (StgArrWords *)allocate(total_size_in_words);
SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, size_in_bytes);
/* obtain a stable ptr */
*stable = getStablePtr((StgPtr)arr);
/* and return a ptr to the goods inside the array */
return(&(arr->payload));
}
示例13: checkGlobalTSOList
/*
Check that all TSOs have been evacuated.
Optionally also check the sanity of the TSOs.
*/
void
checkGlobalTSOList (rtsBool checkTSOs)
{
StgTSO *tso;
nat g;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (tso=generations[g].threads; tso != END_TSO_QUEUE;
tso = tso->global_link) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
if (checkTSOs)
checkTSO(tso);
// If this TSO is dirty and in an old generation, it better
// be on the mutable list.
if (tso->dirty) {
ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
tso->flags &= ~TSO_MARKED;
}
{
StgStack *stack;
StgUnderflowFrame *frame;
stack = tso->stackobj;
while (1) {
if (stack->dirty & 1) {
ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
stack->dirty &= ~TSO_MARKED;
}
frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
- sizeofW(StgUnderflowFrame));
if (frame->info != &stg_stack_underflow_frame_info
|| frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
stack = frame->next_chunk;
}
}
}
}
}
示例14: searchHeapBlocks
static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
{
StgPtr p;
StgInfoTable *info;
nat size;
rtsBool prim;
for (; bd != NULL; bd = bd->link) {
if (bd->flags & BF_PINNED) {
// Assume that objects in PINNED blocks cannot refer to
continue;
}
p = bd->start;
while (p < bd->free) {
info = get_itbl((StgClosure *)p);
prim = rtsFalse;
switch (info->type) {
case THUNK:
size = thunk_sizeW_fromITBL(info);
break;
case THUNK_1_1:
case THUNK_0_2:
case THUNK_2_0:
size = sizeofW(StgThunkHeader) + 2;
break;
case THUNK_1_0:
case THUNK_0_1:
case THUNK_SELECTOR:
size = sizeofW(StgThunkHeader) + 1;
break;
case CONSTR:
case FUN:
case FUN_1_0:
case FUN_0_1:
case FUN_1_1:
case FUN_0_2:
case FUN_2_0:
case CONSTR_1_0:
case CONSTR_0_1:
case CONSTR_1_1:
case CONSTR_0_2:
case CONSTR_2_0:
size = sizeW_fromITBL(info);
break;
case BLACKHOLE:
case BLOCKING_QUEUE:
prim = rtsTrue;
size = sizeW_fromITBL(info);
break;
case IND:
// Special case/Delicate Hack: INDs don't normally
// appear, since we're doing this heap census right
// after GC. However, GarbageCollect() also does
// resurrectThreads(), which can update some
// blackholes when it calls raiseAsync() on the
// resurrected threads. So we know that any IND will
// be the size of a BLACKHOLE.
prim = rtsTrue;
size = BLACKHOLE_sizeW();
break;
case BCO:
prim = rtsTrue;
size = bco_sizeW((StgBCO *)p);
break;
case MVAR_CLEAN:
case MVAR_DIRTY:
case TVAR:
case WEAK:
case PRIM:
case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
prim = rtsTrue;
size = sizeW_fromITBL(info);
break;
case AP:
prim = rtsTrue;
size = ap_sizeW((StgAP *)p);
break;
case PAP:
prim = rtsTrue;
size = pap_sizeW((StgPAP *)p);
break;
case AP_STACK:
{
StgAP_STACK *ap = (StgAP_STACK *)p;
//.........这里部分代码省略.........
示例15: main
int
main(int argc, char *argv[])
{
#ifndef GEN_HASKELL
printf("/* This file is created automatically. Do not edit by hand.*/\n\n");
printf("#define STD_HDR_SIZE %" FMT_SizeT "\n", (size_t)sizeofW(StgHeader) - sizeofW(StgProfHeader));
/* grrr.. PROFILING is on so we need to subtract sizeofW(StgProfHeader) */
printf("#define PROF_HDR_SIZE %" FMT_SizeT "\n", (size_t)sizeofW(StgProfHeader));
printf("#define BLOCK_SIZE %u\n", BLOCK_SIZE);
printf("#define MBLOCK_SIZE %u\n", MBLOCK_SIZE);
printf("#define BLOCKS_PER_MBLOCK %" FMT_SizeT "\n", (lnat)BLOCKS_PER_MBLOCK);
// could be derived, but better to save doing the calculation twice
printf("\n\n");
#endif
field_offset(StgRegTable, rR1);
field_offset(StgRegTable, rR2);
field_offset(StgRegTable, rR3);
field_offset(StgRegTable, rR4);
field_offset(StgRegTable, rR5);
field_offset(StgRegTable, rR6);
field_offset(StgRegTable, rR7);
field_offset(StgRegTable, rR8);
field_offset(StgRegTable, rR9);
field_offset(StgRegTable, rR10);
field_offset(StgRegTable, rF1);
field_offset(StgRegTable, rF2);
field_offset(StgRegTable, rF3);
field_offset(StgRegTable, rF4);
field_offset(StgRegTable, rD1);
field_offset(StgRegTable, rD2);
field_offset(StgRegTable, rL1);
field_offset(StgRegTable, rSp);
field_offset(StgRegTable, rSpLim);
field_offset(StgRegTable, rHp);
field_offset(StgRegTable, rHpLim);
field_offset(StgRegTable, rCCCS);
field_offset(StgRegTable, rCurrentTSO);
field_offset(StgRegTable, rCurrentNursery);
field_offset(StgRegTable, rHpAlloc);
struct_field(StgRegTable, rRet);
struct_field(StgRegTable, rNursery);
def_offset("stgEagerBlackholeInfo", FUN_OFFSET(stgEagerBlackholeInfo));
def_offset("stgGCEnter1", FUN_OFFSET(stgGCEnter1));
def_offset("stgGCFun", FUN_OFFSET(stgGCFun));
field_offset(Capability, r);
field_offset(Capability, lock);
struct_field(Capability, no);
struct_field(Capability, mut_lists);
struct_field(Capability, context_switch);
struct_field(Capability, interrupt);
struct_field(Capability, sparks);
struct_field(bdescr, start);
struct_field(bdescr, free);
struct_field(bdescr, blocks);
struct_field(bdescr, gen_no);
struct_field(bdescr, link);
struct_size(generation);
struct_field(generation, n_new_large_words);
struct_size(CostCentreStack);
struct_field(CostCentreStack, ccsID);
struct_field(CostCentreStack, mem_alloc);
struct_field(CostCentreStack, scc_count);
struct_field(CostCentreStack, prevStack);
struct_field(CostCentre, ccID);
struct_field(CostCentre, link);
struct_field(StgHeader, info);
struct_field_("StgHeader_ccs", StgHeader, prof.ccs);
struct_field_("StgHeader_ldvw", StgHeader, prof.hp.ldvw);
struct_size(StgSMPThunkHeader);
closure_payload(StgClosure,payload);
struct_field(StgEntCounter, allocs);
struct_field(StgEntCounter, registeredp);
struct_field(StgEntCounter, link);
struct_field(StgEntCounter, entry_count);
closure_size(StgUpdateFrame);
closure_size(StgCatchFrame);
closure_size(StgStopFrame);
closure_size(StgMutArrPtrs);
closure_field(StgMutArrPtrs, ptrs);
closure_field(StgMutArrPtrs, size);
closure_size(StgArrWords);
closure_field(StgArrWords, bytes);
closure_payload(StgArrWords, payload);
//.........这里部分代码省略.........