本文整理汇总了Python中pypy.rpython.lltypesystem.llmemory.sizeof函数的典型用法代码示例。如果您正苦于以下问题:Python sizeof函数的具体用法?Python sizeof怎么用?Python sizeof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sizeof函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_size
def get_size(TYPE, translate_support_code):
if translate_support_code:
if TYPE._is_varsize():
return llmemory.sizeof(TYPE, 0)
return llmemory.sizeof(TYPE)
ctype = ll2ctypes.get_ctypes_type(TYPE)
return ctypes.sizeof(ctype)
示例2: test_simple_access
def test_simple_access(self):
AddressStack = get_address_stack()
addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
ll = AddressStack()
ll.append(addr0)
ll.append(addr1)
ll.append(addr2)
assert ll.non_empty()
a = ll.pop()
assert a == addr2
assert ll.non_empty()
a = ll.pop()
assert a == addr1
assert ll.non_empty()
a = ll.pop()
assert a == addr0
assert not ll.non_empty()
ll.append(addr0)
ll.delete()
ll = AddressStack()
ll.append(addr0)
ll.append(addr1)
ll.append(addr2)
ll.append(NULL)
a = ll.pop()
assert a == NULL
ll.delete()
raw_free(addr2)
raw_free(addr1)
raw_free(addr0)
示例3: get_array_token
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
basesize = llmemory.sizeof(T, 0)
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
ofs_length = (llmemory.offsetof(T, T._arrayfld) +
llmemory.ArrayLengthOffset(SUBARRAY))
else:
itemsize = llmemory.sizeof(T.OF)
ofs_length = llmemory.ArrayLengthOffset(T)
else:
if isinstance(T, lltype.Struct):
assert T._arrayfld is not None, "%r is not variable-sized" % (T,)
cstruct = ll2ctypes.get_ctypes_type(T)
cfield = getattr(cstruct, T._arrayfld)
before_array_part = cfield.offset
T = getattr(T, T._arrayfld)
else:
before_array_part = 0
carray = ll2ctypes.get_ctypes_type(T)
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
示例4: identityhash
def identityhash(self, gcobj):
# The following loop should run at most twice.
while 1:
obj = llmemory.cast_ptr_to_adr(gcobj)
hdr = self.header(obj)
if hdr.tid & GCFLAG_HASHMASK:
break
# It's the first time we ask for a hash, and it's not an
# external object. Shrink the top of space by the extra
# hash word that will be needed after a collect.
shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
if shrunk_top < self.free:
# Cannot shrink! Do a collection, asking for at least
# one word of free space, and try again. May raise
# MemoryError. Obscure: not called directly, but
# across an llop, to make sure that there is the
# correct push_roots/pop_roots around the call...
llop.gc_obtain_free_space(llmemory.Address,
llmemory.sizeof(lltype.Signed))
continue
else:
# Now we can have side-effects: lower the top of space
# and set one of the GC_HASH_TAKEN_xxx flags.
self.top_of_space = shrunk_top
if self.is_in_nursery(obj):
hdr.tid |= GC_HASH_TAKEN_NURS
else:
hdr.tid |= GC_HASH_TAKEN_ADDR
break
# Now we can return the result
objsize = self.get_size(obj)
return self._get_object_hash(obj, objsize, hdr.tid)
示例5: walk_marked_objects
def walk_marked_objects(self, callback):
num = 0
size_gc_header = self.gcheaderbuilder.size_gc_header
fromaddr = self.space
toaddr = self.base_forwarding_addr
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
survives = self.marked(obj)
if survives:
typeid = self.get_typeid_from_backup(num)
num += 1
else:
typeid = self.get_type_id(obj)
baseobjsize = self._get_size_for_typeid(obj, typeid)
basesize = size_gc_header + baseobjsize
totalsrcsize = basesize
#
if survives:
grow_hash_field = False
if hdr.tid & GCFLAG_SAVED_HASHFIELD:
totalsrcsize += llmemory.sizeof(lltype.Signed)
totaldstsize = totalsrcsize
if hdr.tid & (GCFLAG_SAVED_HASHTAKEN | GCFLAG_SAVED_HASHFIELD) == GCFLAG_SAVED_HASHTAKEN:
if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
grow_hash_field = True
totaldstsize += llmemory.sizeof(lltype.Signed)
callback(self, obj, typeid, basesize, toaddr, grow_hash_field)
toaddr += totaldstsize
else:
if hdr.tid & GCFLAG_HASHFIELD:
totalsrcsize += llmemory.sizeof(lltype.Signed)
#
fromaddr += totalsrcsize
示例6: compute_alive_objects
def compute_alive_objects(self):
fromaddr = self.space
addraftercollect = self.space
num = 1
while fromaddr < self.free:
size_gc_header = self.gcheaderbuilder.size_gc_header
tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
obj = fromaddr + size_gc_header
objsize = self.get_size(obj)
objtotalsize = size_gc_header + objsize
if self.marked(obj):
copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
((tid & GCFLAG_HASHTAKEN) != 0 and
addraftercollect < fromaddr))
addraftercollect += raw_malloc_usage(objtotalsize)
if copy_has_hash_field:
addraftercollect += llmemory.sizeof(lltype.Signed)
num += 1
fromaddr += objtotalsize
if tid & GCFLAG_HASHFIELD:
fromaddr += llmemory.sizeof(lltype.Signed)
ll_assert(addraftercollect <= fromaddr,
"markcompactcollect() is trying to increase memory usage")
self.totalsize_of_objs = addraftercollect - self.space
return num
示例7: ll_arraycopy
def ll_arraycopy(source, dest, source_start, dest_start, length):
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.objectmodel import keepalive_until_here
# supports non-overlapping copies only
if not we_are_translated():
if source == dest:
assert (source_start + length <= dest_start or
dest_start + length <= source_start)
TP = lltype.typeOf(source).TO
assert TP == lltype.typeOf(dest).TO
if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
# perform a write barrier that copies necessary flags from
# source to dest
if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
# if the write barrier is not supported, copy by hand
for i in range(length):
dest[i + dest_start] = source[i + source_start]
return
source_addr = llmemory.cast_ptr_to_adr(source)
dest_addr = llmemory.cast_ptr_to_adr(dest)
cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
llmemory.sizeof(TP.OF) * source_start)
cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
llmemory.sizeof(TP.OF) * dest_start)
llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
llmemory.sizeof(TP.OF) * length)
keepalive_until_here(source)
keepalive_until_here(dest)
示例8: test_primitive
def test_primitive(self):
assert lltype2ctypes(5) == 5
assert lltype2ctypes('?') == ord('?')
assert lltype2ctypes('\xE0') == 0xE0
assert lltype2ctypes(unichr(1234)) == 1234
assert ctypes2lltype(lltype.Signed, 5) == 5
assert ctypes2lltype(lltype.Char, ord('a')) == 'a'
assert ctypes2lltype(lltype.UniChar, ord(u'x')) == u'x'
assert ctypes2lltype(lltype.Char, 0xFF) == '\xFF'
assert lltype2ctypes(5.25) == 5.25
assert ctypes2lltype(lltype.Float, 5.25) == 5.25
assert lltype2ctypes(u'x') == ord(u'x')
res = lltype2ctypes(rffi.r_singlefloat(-3.5))
assert isinstance(res, ctypes.c_float)
assert res.value == -3.5
res = ctypes2lltype(lltype.SingleFloat, ctypes.c_float(-3.5))
assert isinstance(res, rffi.r_singlefloat)
assert float(res) == -3.5
assert lltype2ctypes(rffi.r_ulong(-1)) == sys.maxint * 2 + 1
res = ctypes2lltype(lltype.Unsigned, sys.maxint * 2 + 1)
assert (res, type(res)) == (rffi.r_ulong(-1), rffi.r_ulong)
res = lltype2ctypes(llmemory.sizeof(lltype.Signed))
assert res == struct.calcsize("l")
S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
res = lltype2ctypes(llmemory.sizeof(S))
assert res == struct.calcsize("ll")
p = lltype.nullptr(S)
cptr = lltype2ctypes(p)
assert not cptr
py.test.raises(ValueError, 'cptr.contents') # NULL pointer access
res = ctypes2lltype(lltype.Ptr(S), cptr)
assert res == p
assert not ALLOCATED # detects memory leaks in the test
示例9: encode_type_shape
def encode_type_shape(builder, info, TYPE):
"""Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
offsets = offsets_to_gc_pointers(TYPE)
info.ofstoptrs = builder.offsets2table(offsets, TYPE)
info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE)
info.weakptrofs = weakpointer_offset(TYPE)
if not TYPE._is_varsize():
#info.isvarsize = False
#info.gcptrinvarsize = False
info.fixedsize = llarena.round_up_for_allocation(
llmemory.sizeof(TYPE))
info.ofstolength = -1
# note about round_up_for_allocation(): in the 'info' table
# we put a rounded-up size only for fixed-size objects. For
# varsize ones, the GC must anyway compute the size at run-time
# and round up that result.
else:
#info.isvarsize = True
info.fixedsize = llmemory.sizeof(TYPE, 0)
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
else:
ARRAY = TYPE
info.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
info.ofstovar = llmemory.itemoffsetof(TYPE, 0)
assert isinstance(ARRAY, lltype.Array)
if ARRAY.OF != lltype.Void:
offsets = offsets_to_gc_pointers(ARRAY.OF)
else:
offsets = ()
info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
info.varitemsize = llmemory.sizeof(ARRAY.OF)
示例10: link
def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
assert step in (1, 2)
llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
if step == 1:
page.nfree = 0
nuninitialized = nblocks - nusedblocks
else:
page.nfree = nusedblocks
nuninitialized = nblocks - 2*nusedblocks
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
if nusedblocks < nblocks:
chainedlists = ac.page_for_size
else:
chainedlists = ac.full_page_for_size
page.nextpage = chainedlists[size_class]
page.arena = ac.current_arena
chainedlists[size_class] = page
if fill_with_objects:
for i in range(0, nusedblocks*step, step):
objaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(objaddr, _dummy_size(size_block))
if step == 2:
prev = 'page.freeblock'
for i in range(1, nusedblocks*step, step):
holeaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(holeaddr,
llmemory.sizeof(llmemory.Address))
exec '%s = holeaddr' % prev in globals(), locals()
prevhole = holeaddr
prev = 'prevhole.address[0]'
endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
exec '%s = endaddr' % prev in globals(), locals()
assert ac._nuninitialized(page, size_class) == nuninitialized
示例11: identityhash
def identityhash(self, gcobj):
# The following code should run at most twice.
while 1:
obj = llmemory.cast_ptr_to_adr(gcobj)
hdr = self.header(obj)
#
if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end
obj += self.get_size(obj)
return obj.signed[0]
#
if not (hdr.tid & GCFLAG_HASHTAKEN):
# It's the first time we ask for a hash, and it's not an
# external object. Shrink the top of space by the extra
# hash word that will be needed after a collect.
shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
if shrunk_top < self.free:
# Cannot shrink! Do a collection, asking for at least
# one word of free space, and try again. May raise
# MemoryError. Obscure: not called directly, but
# across an llop, to make sure that there is the
# correct push_roots/pop_roots around the call...
llop.gc_obtain_free_space(llmemory.Address,
llmemory.sizeof(lltype.Signed))
continue
# Now we can have side-effects: set GCFLAG_HASHTAKEN
# and lower the top of space.
self.top_of_space = shrunk_top
hdr.tid |= GCFLAG_HASHTAKEN
#
return llmemory.cast_adr_to_int(obj) # direct case
示例12: varsize_malloc_helper
def varsize_malloc_helper(self, hop, flags, meth, extraargs):
def intconst(c): return rmodel.inputconst(lltype.Signed, c)
op = hop.spaceop
TYPE = op.result.concretetype.TO
assert TYPE._is_varsize()
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
else:
ARRAY = TYPE
assert isinstance(ARRAY, lltype.Array)
c_const_size = intconst(llmemory.sizeof(TYPE, 0))
c_item_size = intconst(llmemory.sizeof(ARRAY.OF))
if ARRAY._hints.get("nolength", False):
c_offset_to_length = None
else:
if isinstance(TYPE, lltype.Struct):
offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
llmemory.ArrayLengthOffset(ARRAY)
else:
offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
c_offset_to_length = intconst(offset_to_length)
args = [hop] + extraargs + [flags, TYPE,
op.args[-1], c_const_size, c_item_size, c_offset_to_length]
v_raw = meth(*args)
hop.cast_result(v_raw)
示例13: gct_malloc_varsize
def gct_malloc_varsize(self, hop):
def intconst(c): return rmodel.inputconst(lltype.Signed, c)
op = hop.spaceop
TYPE = op.result.concretetype.TO
assert TYPE._is_varsize()
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
else:
ARRAY = TYPE
assert isinstance(ARRAY, lltype.Array)
if ARRAY._hints.get('isrpystring', False):
c_const_size = intconst(llmemory.sizeof(TYPE, 1))
else:
c_const_size = intconst(llmemory.sizeof(TYPE, 0))
c_item_size = intconst(llmemory.sizeof(ARRAY.OF))
if ARRAY._hints.get("nolength", False):
v_raw = hop.genop("direct_call",
[self.malloc_varsize_no_length_ptr, op.args[-1],
c_const_size, c_item_size],
resulttype=llmemory.Address)
else:
if isinstance(TYPE, lltype.Struct):
offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
llmemory.ArrayLengthOffset(ARRAY)
else:
offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
v_raw = hop.genop("direct_call",
[self.malloc_varsize_ptr, op.args[-1],
c_const_size, c_item_size, intconst(offset_to_length)],
resulttype=llmemory.Address)
hop.cast_result(v_raw)
示例14: update_forward_pointers
def update_forward_pointers(self, toaddr, maxnum):
self.base_forwarding_addr = base_forwarding_addr = toaddr
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
num = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
# compute the original object size, including the
# optional hash field
basesize = size_gc_header + self.get_size(obj)
totalsrcsize = basesize
if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too
totalsrcsize += llmemory.sizeof(lltype.Signed)
#
if self.marked(obj):
# the object is marked as suriving. Compute the new object
# size
totaldstsize = totalsrcsize
if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
# grow a new hash field -- with the exception: if
# the object actually doesn't move, don't
# (otherwise, we get a bogus toaddr > fromaddr)
if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
totaldstsize += llmemory.sizeof(lltype.Signed)
#
if not translated_to_c():
llarena.arena_reserve(toaddr, basesize)
if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
#
# save the field hdr.tid in the array tid_backup
ll_assert(num < maxnum, "overflow of the tid_backup table")
self.tid_backup[num] = self.get_type_id(obj)
num += 1
# compute forward_offset, the offset to the future copy
# of this object
forward_offset = toaddr - base_forwarding_addr
# copy the first two gc flags in forward_offset
ll_assert(forward_offset & 3 == 0, "misalignment!")
forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
hdr.tid = forward_offset | GCFLAG_MARKBIT
ll_assert(self.marked(obj), "re-marking object failed!")
# done
toaddr += totaldstsize
#
fromaddr += totalsrcsize
if not translated_to_c():
assert toaddr - base_forwarding_addr <= fromaddr - self.space
self.num_alive_objs = num
self.finaladdr = toaddr
# now update references
self.root_walker.walk_roots(
MarkCompactGC._update_ref, # stack roots
MarkCompactGC._update_ref, # static in prebuilt non-gc structures
MarkCompactGC._update_ref,
) # static in prebuilt gc objects
self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
示例15: markcompactcollect
def markcompactcollect(self, needed=0):
start_time = self.debug_collect_start()
self.debug_check_consistency()
self.to_see = self.AddressStack()
self.mark_roots_recursively()
if (self.objects_with_finalizers.non_empty() or
self.run_finalizers.non_empty()):
self.mark_objects_with_finalizers()
self._trace_and_mark()
self.to_see.delete()
num_of_alive_objs = self.compute_alive_objects()
size_of_alive_objs = self.totalsize_of_objs
totalsize = self.new_space_size(size_of_alive_objs, needed +
num_of_alive_objs * BYTES_PER_TID)
tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
if totalsize >= self.space_size or used_space_now >= self.space_size:
toaddr = self.double_space_size(totalsize)
llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
toaddr + size_of_alive_objs,
lltype.Ptr(self.TID_BACKUP))
resizing = True
else:
toaddr = llarena.arena_new_view(self.space)
llarena.arena_reserve(self.top_of_space, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
self.top_of_space,
lltype.Ptr(self.TID_BACKUP))
resizing = False
self.next_collect_after = totalsize
weakref_offsets = self.collect_weakref_offsets()
finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
if self.objects_with_weakrefs.non_empty():
self.invalidate_weakrefs(weakref_offsets)
self.update_objects_with_id()
self.compact(resizing)
if not resizing:
size = toaddr + self.space_size - finaladdr
llarena.arena_reset(finaladdr, size, True)
else:
if we_are_translated():
# because we free stuff already in raw_memmove, we
# would get double free here. Let's free it anyway
llarena.arena_free(self.space)
llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
True)
self.space = toaddr
self.free = finaladdr
self.top_of_space = toaddr + self.next_collect_after
self.debug_check_consistency()
self.tid_backup = lltype.nullptr(self.TID_BACKUP)
if self.run_finalizers.non_empty():
self.execute_finalizers()
self.debug_collect_finish(start_time)