本文整理汇总了Python中pypy.rpython.lltypesystem.llmemory.raw_malloc_usage函数的典型用法代码示例。如果您正苦于以下问题:Python raw_malloc_usage函数的具体用法?Python raw_malloc_usage怎么用?Python raw_malloc_usage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了raw_malloc_usage函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: allocate_object
def allocate_object(self, offset, size, letter='x'):
self.check()
bytes = llmemory.raw_malloc_usage(size)
if offset + bytes > self.nbytes:
raise ArenaError("object overflows beyond the end of the arena")
zero = True
for c in self.usagemap[offset:offset+bytes]:
if c == '0':
pass
elif c == '#':
zero = False
else:
raise ArenaError("new object overlaps a previous object")
assert offset not in self.objectptrs
addr2 = size._raw_malloc([], zero=zero)
pattern = letter.upper() + letter*(bytes-1)
self.usagemap[offset:offset+bytes] = array.array('c', pattern)
self.setobject(addr2, offset, bytes)
# common case: 'size' starts with a GCHeaderOffset. In this case
# we can also remember that the real object starts after the header.
while isinstance(size, RoundedUpForAllocation):
size = size.basesize
if (isinstance(size, llmemory.CompositeOffset) and
isinstance(size.offsets[0], llmemory.GCHeaderOffset)):
objaddr = addr2 + size.offsets[0]
hdrbytes = llmemory.raw_malloc_usage(size.offsets[0])
objoffset = offset + hdrbytes
self.setobject(objaddr, objoffset, bytes - hdrbytes)
return addr2
示例2: realloc
def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
size_gc_header = self.size_gc_header()
addr = llmemory.cast_ptr_to_adr(ptr)
tid = self.get_type_id(addr)
nonvarsize = size_gc_header + fixedsize
try:
varsize = ovfcheck(itemsize * newlength)
tot_size = ovfcheck(nonvarsize + varsize)
except OverflowError:
raise MemoryError()
oldlength = (addr + lengthofs).signed[0]
old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
source_addr = addr - size_gc_header
self.gen2_resizable_objects.remove(addr)
if grow:
result = llop.raw_realloc_grow(llmemory.Address, source_addr,
old_tot_size, tot_size)
else:
result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
old_tot_size, tot_size)
if not result:
self.gen2_resizable_objects.append(addr)
raise MemoryError()
if grow:
self.gen2_resizable_objects.append(result + size_gc_header)
else:
self.gen2_rawmalloced_objects.append(result + size_gc_header)
self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
raw_malloc_usage(old_tot_size),
can_collect = not grow)
(result + size_gc_header + lengthofs).signed[0] = newlength
return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
示例3: malloc_fixedsize_clear
def malloc_fixedsize_clear(self, typeid, size, can_collect,
has_finalizer=False, contains_weakptr=False):
if (has_finalizer or not can_collect or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_fixedsize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
# "non-simple" case or object too big: don't use the nursery
return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
can_collect,
has_finalizer,
contains_weakptr)
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
self.nursery_free = result + totalsize
if contains_weakptr:
self.young_objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
示例4: update_forward_pointers
def update_forward_pointers(self, toaddr, maxnum):
self.base_forwarding_addr = base_forwarding_addr = toaddr
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
num = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
# compute the original object size, including the
# optional hash field
basesize = size_gc_header + self.get_size(obj)
totalsrcsize = basesize
if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too
totalsrcsize += llmemory.sizeof(lltype.Signed)
#
if self.marked(obj):
# the object is marked as suriving. Compute the new object
# size
totaldstsize = totalsrcsize
if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
# grow a new hash field -- with the exception: if
# the object actually doesn't move, don't
# (otherwise, we get a bogus toaddr > fromaddr)
if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
totaldstsize += llmemory.sizeof(lltype.Signed)
#
if not translated_to_c():
llarena.arena_reserve(toaddr, basesize)
if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
#
# save the field hdr.tid in the array tid_backup
ll_assert(num < maxnum, "overflow of the tid_backup table")
self.tid_backup[num] = self.get_type_id(obj)
num += 1
# compute forward_offset, the offset to the future copy
# of this object
forward_offset = toaddr - base_forwarding_addr
# copy the first two gc flags in forward_offset
ll_assert(forward_offset & 3 == 0, "misalignment!")
forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
hdr.tid = forward_offset | GCFLAG_MARKBIT
ll_assert(self.marked(obj), "re-marking object failed!")
# done
toaddr += totaldstsize
#
fromaddr += totalsrcsize
if not translated_to_c():
assert toaddr - base_forwarding_addr <= fromaddr - self.space
self.num_alive_objs = num
self.finaladdr = toaddr
# now update references
self.root_walker.walk_roots(
MarkCompactGC._update_ref, # stack roots
MarkCompactGC._update_ref, # static in prebuilt non-gc structures
MarkCompactGC._update_ref,
) # static in prebuilt gc objects
self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
示例5: _get_totalsize_var
def _get_totalsize_var(self, nonvarsize, itemsize, length):
try:
varsize = ovfcheck(itemsize * length)
except OverflowError:
raise MemoryError
# Careful to detect overflows. The following works even if varsize
# is almost equal to sys.maxint; morever, self.space_size is known
# to be at least 4095 bytes smaller than sys.maxint, so this function
# always raises instead of returning an integer >= sys.maxint-4095.
if raw_malloc_usage(varsize) > self.space_size - raw_malloc_usage(nonvarsize):
raise MemoryError
return llarena.round_up_for_allocation(nonvarsize + varsize)
示例6: try_obtain_free_space
def try_obtain_free_space(self, needed):
needed = raw_malloc_usage(needed)
while 1:
self.markcompactcollect(needed)
missing = needed - (self.top_of_space - self.free)
if missing < 0:
return True
示例7: make_a_nonmoving_copy
def make_a_nonmoving_copy(self, obj, objsize):
# NB. the object can have a finalizer or be a weakref, but
# it's not an issue.
totalsize = self.size_gc_header() + objsize
newaddr = self.allocate_external_object(totalsize)
if not newaddr:
return llmemory.NULL # can't raise MemoryError during a collect()
if self.config.gcconfig.debugprint:
self._nonmoving_copy_count += 1
self._nonmoving_copy_size += raw_malloc_usage(totalsize)
llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
newobj = newaddr + self.size_gc_header()
hdr = self.header(newobj)
hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
# GCFLAG_UNVISITED is not set
# GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be
# set by the next collection's collect_last_generation_roots().
# This old object is immediately put at generation 3.
ll_assert(self.is_last_generation(newobj),
"make_a_nonmoving_copy: object too young")
self.gen3_rawmalloced_objects.append(newobj)
self.last_generation_root_objects.append(newobj)
self.rawmalloced_objects_to_trace.append(newobj) # visit me
return newobj
示例8: malloc_varsize_slowpath
def malloc_varsize_slowpath(self, typeid, length, force_nonmovable=False):
# For objects that are too large, or when the nursery is exhausted.
# In order to keep malloc_varsize_clear() as compact as possible,
# we recompute what we need in this slow path instead of passing
# it all as function arguments.
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + self.fixed_size(typeid)
itemsize = self.varsize_item_sizes(typeid)
offset_to_length = self.varsize_offset_to_length(typeid)
try:
varsize = ovfcheck(itemsize * length)
totalsize = ovfcheck(nonvarsize + varsize)
except OverflowError:
raise MemoryError()
if self.has_gcptr_in_varsize(typeid):
nonlarge_max = self.nonlarge_gcptrs_max
else:
nonlarge_max = self.nonlarge_max
if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
result = self.malloc_varsize_marknsweep(totalsize)
flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
else:
result = self.malloc_varsize_collecting_nursery(totalsize)
flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
self.init_gc_object(result, typeid, flags)
(result + size_gc_header + offset_to_length).signed[0] = length
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
示例9: make_a_nonmoving_copy
def make_a_nonmoving_copy(self, obj, objsize):
# NB. the object can have a finalizer or be a weakref, but
# it's not an issue.
totalsize = self.size_gc_header() + objsize
tid = self.header(obj).tid
if tid & GCFLAG_HASHMASK:
totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
else:
totalsize_incl_hash = totalsize
newaddr = self.allocate_external_object(totalsize_incl_hash)
if not newaddr:
return llmemory.NULL # can't raise MemoryError during a collect()
self._nonmoving_copy_count += 1
self._nonmoving_copy_size += raw_malloc_usage(totalsize)
llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
if tid & GCFLAG_HASHMASK:
hash = self._get_object_hash(obj, objsize, tid)
(newaddr + totalsize).signed[0] = hash
tid |= GC_HASH_HASFIELD
#
# GCFLAG_UNVISITED is not set
# GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be
# set by the next collection's collect_last_generation_roots().
# This old object is immediately put at generation 3.
newobj = newaddr + self.size_gc_header()
hdr = self.header(newobj)
hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
ll_assert(self.is_last_generation(newobj),
"make_a_nonmoving_copy: object too young")
self.gen3_rawmalloced_objects.append(newobj)
self.last_generation_root_objects.append(newobj)
self.rawmalloced_objects_to_trace.append(newobj) # visit me
return newobj
示例10: malloc_fixedsize
def malloc_fixedsize(self, typeid16, size, can_collect,
has_finalizer=False, contains_weakptr=False):
if can_collect:
self.maybe_collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
tot_size = size_gc_header + size
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid16 = typeid16
hdr.mark = False
hdr.flags = '\x00'
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
elif contains_weakptr:
hdr.next = self.objects_with_weak_pointers
self.objects_with_weak_pointers = hdr
else:
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
self.write_malloc_statistics(typeid16, tot_size, result, False)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
示例11: malloc_varsize_clear
def malloc_varsize_clear(self, typeid16, length, size, itemsize,
offset_to_length, can_collect):
if can_collect:
self.maybe_collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
try:
fixsize = size_gc_header + size
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(fixsize + varsize)
usage = raw_malloc_usage(tot_size)
bytes_malloced = ovfcheck(self.bytes_malloced+usage)
ovfcheck(self.heap_usage + bytes_malloced)
except OverflowError:
raise memoryError
result = raw_malloc(tot_size)
if not result:
raise memoryError
raw_memclear(result, tot_size)
(result + size_gc_header + offset_to_length).signed[0] = length
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
hdr.typeid16 = typeid16
hdr.mark = False
hdr.flags = '\x00'
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc_varsize length', length,
# 'typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
self.write_malloc_statistics(typeid16, tot_size, result, True)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
示例12: compute_alive_objects
def compute_alive_objects(self):
fromaddr = self.space
addraftercollect = self.space
num = 1
while fromaddr < self.free:
size_gc_header = self.gcheaderbuilder.size_gc_header
tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
obj = fromaddr + size_gc_header
objsize = self.get_size(obj)
objtotalsize = size_gc_header + objsize
if self.marked(obj):
copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
((tid & GCFLAG_HASHTAKEN) != 0 and
addraftercollect < fromaddr))
addraftercollect += raw_malloc_usage(objtotalsize)
if copy_has_hash_field:
addraftercollect += llmemory.sizeof(lltype.Signed)
num += 1
fromaddr += objtotalsize
if tid & GCFLAG_HASHFIELD:
fromaddr += llmemory.sizeof(lltype.Signed)
ll_assert(addraftercollect <= fromaddr,
"markcompactcollect() is trying to increase memory usage")
self.totalsize_of_objs = addraftercollect - self.space
return num
示例13: markcompactcollect
def markcompactcollect(self, needed=0):
start_time = self.debug_collect_start()
self.debug_check_consistency()
self.to_see = self.AddressStack()
self.mark_roots_recursively()
if (self.objects_with_finalizers.non_empty() or
self.run_finalizers.non_empty()):
self.mark_objects_with_finalizers()
self._trace_and_mark()
self.to_see.delete()
num_of_alive_objs = self.compute_alive_objects()
size_of_alive_objs = self.totalsize_of_objs
totalsize = self.new_space_size(size_of_alive_objs, needed +
num_of_alive_objs * BYTES_PER_TID)
tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
if totalsize >= self.space_size or used_space_now >= self.space_size:
toaddr = self.double_space_size(totalsize)
llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
toaddr + size_of_alive_objs,
lltype.Ptr(self.TID_BACKUP))
resizing = True
else:
toaddr = llarena.arena_new_view(self.space)
llarena.arena_reserve(self.top_of_space, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
self.top_of_space,
lltype.Ptr(self.TID_BACKUP))
resizing = False
self.next_collect_after = totalsize
weakref_offsets = self.collect_weakref_offsets()
finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
if self.objects_with_weakrefs.non_empty():
self.invalidate_weakrefs(weakref_offsets)
self.update_objects_with_id()
self.compact(resizing)
if not resizing:
size = toaddr + self.space_size - finaladdr
llarena.arena_reset(finaladdr, size, True)
else:
if we_are_translated():
# because we free stuff already in raw_memmove, we
# would get double free here. Let's free it anyway
llarena.arena_free(self.space)
llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
True)
self.space = toaddr
self.free = finaladdr
self.top_of_space = toaddr + self.next_collect_after
self.debug_check_consistency()
self.tid_backup = lltype.nullptr(self.TID_BACKUP)
if self.run_finalizers.non_empty():
self.execute_finalizers()
self.debug_collect_finish(start_time)
示例14: malloc_varsize_clear
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length, can_collect,
has_finalizer=False):
# Only use the nursery if there are not too many items.
if not raw_malloc_usage(itemsize):
too_many_items = False
else:
# The following line is usually constant-folded because both
# min_nursery_size and itemsize are constants (the latter
# due to inlining).
maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
raw_malloc_usage(itemsize))
# The actual maximum length for our nursery depends on how
# many times our nursery is bigger than the minimal size.
# The computation is done in this roundabout way so that
# only the only remaining computation is the following
# shift.
maxlength = maxlength_for_minimal_nursery << self.nursery_scale
too_many_items = length > maxlength
if (has_finalizer or not can_collect or
too_many_items or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_var_basesize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
itemsize, offset_to_length,
can_collect, has_finalizer)
# with the above checks we know now that totalsize cannot be more
# than about half of the nursery size; in particular, the + and *
# cannot overflow
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size + itemsize * length
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
(result + size_gc_header + offset_to_length).signed[0] = length
self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
示例15: malloc_varsize_collecting_nursery
def malloc_varsize_collecting_nursery(self, totalsize):
result = self.collect_nursery()
ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
"not enough room in malloc_varsize_collecting_nursery()")
llarena.arena_reserve(result, totalsize)
self.nursery_free = result + llarena.round_up_for_allocation(
totalsize)
return result