本文整理汇总了Python中pypy.rpython.lltypesystem.llarena.arena_reserve函数的典型用法代码示例。如果您正苦于以下问题:Python arena_reserve函数的具体用法?Python arena_reserve怎么用?Python arena_reserve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了arena_reserve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: malloc_fixedsize_clear
def malloc_fixedsize_clear(self, typeid, size, can_collect,
has_finalizer=False, contains_weakptr=False):
if (has_finalizer or not can_collect or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_fixedsize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
# "non-simple" case or object too big: don't use the nursery
return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
can_collect,
has_finalizer,
contains_weakptr)
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
self.nursery_free = result + totalsize
if contains_weakptr:
self.young_objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
示例2: test_address_order
def test_address_order():
a = arena_malloc(20, False)
assert eq(a, a)
assert lt(a, a+1)
assert lt(a+5, a+20)
b = arena_malloc(20, False)
if a > b:
a, b = b, a
assert lt(a, b)
assert lt(a+19, b)
assert lt(a, b+19)
c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
arena_reserve(c, precomputed_size)
assert lt(b, c)
assert lt(a, c)
assert lt(llmemory.NULL, c)
d = c + llmemory.offsetof(SX, 'x')
assert lt(c, d)
assert lt(b, d)
assert lt(a, d)
assert lt(llmemory.NULL, d)
e = c + precomputed_size
assert lt(d, e)
assert lt(c, e)
assert lt(b, e)
assert lt(a, e)
assert lt(llmemory.NULL, e)
示例3: markcompactcollect
def markcompactcollect(self, needed=0):
start_time = self.debug_collect_start()
self.debug_check_consistency()
self.to_see = self.AddressStack()
self.mark_roots_recursively()
if (self.objects_with_finalizers.non_empty() or
self.run_finalizers.non_empty()):
self.mark_objects_with_finalizers()
self._trace_and_mark()
self.to_see.delete()
num_of_alive_objs = self.compute_alive_objects()
size_of_alive_objs = self.totalsize_of_objs
totalsize = self.new_space_size(size_of_alive_objs, needed +
num_of_alive_objs * BYTES_PER_TID)
tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
if totalsize >= self.space_size or used_space_now >= self.space_size:
toaddr = self.double_space_size(totalsize)
llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
toaddr + size_of_alive_objs,
lltype.Ptr(self.TID_BACKUP))
resizing = True
else:
toaddr = llarena.arena_new_view(self.space)
llarena.arena_reserve(self.top_of_space, tid_backup_size)
self.tid_backup = llmemory.cast_adr_to_ptr(
self.top_of_space,
lltype.Ptr(self.TID_BACKUP))
resizing = False
self.next_collect_after = totalsize
weakref_offsets = self.collect_weakref_offsets()
finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
if self.objects_with_weakrefs.non_empty():
self.invalidate_weakrefs(weakref_offsets)
self.update_objects_with_id()
self.compact(resizing)
if not resizing:
size = toaddr + self.space_size - finaladdr
llarena.arena_reset(finaladdr, size, True)
else:
if we_are_translated():
# because we free stuff already in raw_memmove, we
# would get double free here. Let's free it anyway
llarena.arena_free(self.space)
llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
True)
self.space = toaddr
self.free = finaladdr
self.top_of_space = toaddr + self.next_collect_after
self.debug_check_consistency()
self.tid_backup = lltype.nullptr(self.TID_BACKUP)
if self.run_finalizers.non_empty():
self.execute_finalizers()
self.debug_collect_finish(start_time)
示例4: update_forward_pointers
def update_forward_pointers(self, toaddr, maxnum):
self.base_forwarding_addr = base_forwarding_addr = toaddr
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
num = 0
while fromaddr < self.free:
hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
obj = fromaddr + size_gc_header
# compute the original object size, including the
# optional hash field
basesize = size_gc_header + self.get_size(obj)
totalsrcsize = basesize
if hdr.tid & GCFLAG_HASHFIELD: # already a hash field, copy it too
totalsrcsize += llmemory.sizeof(lltype.Signed)
#
if self.marked(obj):
# the object is marked as suriving. Compute the new object
# size
totaldstsize = totalsrcsize
if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
# grow a new hash field -- with the exception: if
# the object actually doesn't move, don't
# (otherwise, we get a bogus toaddr > fromaddr)
if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
totaldstsize += llmemory.sizeof(lltype.Signed)
#
if not translated_to_c():
llarena.arena_reserve(toaddr, basesize)
if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
#
# save the field hdr.tid in the array tid_backup
ll_assert(num < maxnum, "overflow of the tid_backup table")
self.tid_backup[num] = self.get_type_id(obj)
num += 1
# compute forward_offset, the offset to the future copy
# of this object
forward_offset = toaddr - base_forwarding_addr
# copy the first two gc flags in forward_offset
ll_assert(forward_offset & 3 == 0, "misalignment!")
forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
hdr.tid = forward_offset | GCFLAG_MARKBIT
ll_assert(self.marked(obj), "re-marking object failed!")
# done
toaddr += totaldstsize
#
fromaddr += totalsrcsize
if not translated_to_c():
assert toaddr - base_forwarding_addr <= fromaddr - self.space
self.num_alive_objs = num
self.finaladdr = toaddr
# now update references
self.root_walker.walk_roots(
MarkCompactGC._update_ref, # stack roots
MarkCompactGC._update_ref, # static in prebuilt non-gc structures
MarkCompactGC._update_ref,
) # static in prebuilt gc objects
self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
示例5: malloc_varsize_collecting_nursery
def malloc_varsize_collecting_nursery(self, totalsize):
result = self.collect_nursery()
ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
"not enough room in malloc_varsize_collecting_nursery()")
llarena.arena_reserve(result, totalsize)
self.nursery_free = result + llarena.round_up_for_allocation(
totalsize)
return result
示例6: test_address_eq_as_int
def test_address_eq_as_int():
a = arena_malloc(50, False)
arena_reserve(a, precomputed_size)
p = llmemory.cast_adr_to_ptr(a, SPTR)
a1 = llmemory.cast_ptr_to_adr(p)
assert a == a1
assert not (a != a1)
assert (a+1) != a1
assert not ((a+1) == a1)
py.test.skip("cast_adr_to_int() is hard to get consistent")
assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
示例7: _get_memory
def _get_memory(self, totalsize):
# also counts the space that will be needed during the following
# collection to store the TID
requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
self.next_collect_after -= requested_size
if self.next_collect_after < 0:
result = self.obtain_free_space(requested_size)
else:
result = self.free
self.free += totalsize
llarena.arena_reserve(result, totalsize)
return result
示例8: test_shrink_obj
def test_shrink_obj():
from pypy.rpython.memory.gcheader import GCHeaderBuilder
HDR = lltype.Struct('HDR', ('h', lltype.Signed))
gcheaderbuilder = GCHeaderBuilder(HDR)
size_gc_header = gcheaderbuilder.size_gc_header
S = lltype.GcStruct('S', ('x', lltype.Signed),
('a', lltype.Array(lltype.Signed)))
myarenasize = 200
a = arena_malloc(myarenasize, False)
arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
示例9: markcompactcollect
def markcompactcollect(self, requested_size=0):
self.debug_collect_start(requested_size)
self.debug_check_consistency()
#
# Mark alive objects
#
self.to_see = self.AddressDeque()
self.trace_from_roots()
self.to_see.delete()
#
# Prepare new views on the same memory
#
toaddr = llarena.arena_new_view(self.space)
maxnum = self.space_size - (self.free - self.space)
maxnum /= BYTES_PER_TID
llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
lltype.Ptr(TID_BACKUP))
#
# Walk all objects and assign forward pointers in the same order,
# also updating all references
#
self.update_forward_pointers(toaddr, maxnum)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
self.update_objects_with_id()
self.compact()
#
self.tid_backup = lltype.nullptr(TID_BACKUP)
self.free = self.finaladdr
self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
self.num_alive_objs,
requested_size)
#
if not translated_to_c():
remaining_size = (toaddr + self.space_size) - self.finaladdr
llarena.arena_reset(self.finaladdr, remaining_size, False)
llarena.arena_free(self.space)
self.space = toaddr
#
self.debug_check_consistency()
self.debug_collect_finish()
if self.next_collect_after < 0:
raise MemoryError
#
if self.run_finalizers.non_empty():
self.execute_finalizers()
return True # executed some finalizers
else:
return False # no finalizer executed
示例10: allocate_new_page
def allocate_new_page(self, size_class):
"""Allocate and return a new page for the given size_class."""
#
# Allocate a new arena if needed.
if self.current_arena == ARENA_NULL:
self.allocate_new_arena()
#
# The result is simply 'current_arena.freepages'.
arena = self.current_arena
result = arena.freepages
if arena.nfreepages > 0:
#
# The 'result' was part of the chained list; read the next.
arena.nfreepages -= 1
freepages = result.address[0]
llarena.arena_reset(result,
llmemory.sizeof(llmemory.Address),
0)
#
else:
# The 'result' is part of the uninitialized pages.
ll_assert(self.num_uninitialized_pages > 0,
"fully allocated arena found in self.current_arena")
self.num_uninitialized_pages -= 1
if self.num_uninitialized_pages > 0:
freepages = result + self.page_size
else:
freepages = NULL
#
arena.freepages = freepages
if freepages == NULL:
# This was the last page, so put the arena away into
# arenas_lists[0].
ll_assert(arena.nfreepages == 0,
"freepages == NULL but nfreepages > 0")
arena.nextarena = self.arenas_lists[0]
self.arenas_lists[0] = arena
self.current_arena = ARENA_NULL
#
# Initialize the fields of the resulting page
llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
page.arena = arena
page.nfree = 0
page.freeblock = result + self.hdrsize
page.nextpage = PAGE_NULL
ll_assert(self.page_for_size[size_class] == PAGE_NULL,
"allocate_new_page() called but a page is already waiting")
self.page_for_size[size_class] = page
return page
示例11: free_page
def free_page(self, page):
"""Free a whole page."""
#
# Insert the freed page in the arena's 'freepages' list.
# If nfreepages == totalpages, then it will be freed at the
# end of mass_free().
arena = page.arena
arena.nfreepages += 1
pageaddr = llmemory.cast_ptr_to_adr(page)
pageaddr = llarena.getfakearenaaddress(pageaddr)
llarena.arena_reset(pageaddr, self.page_size, 0)
llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
pageaddr.address[0] = arena.freepages
arena.freepages = pageaddr
示例12: test_look_inside_object
def test_look_inside_object():
# this code is also used in translation tests below
myarenasize = 50
a = arena_malloc(myarenasize, False)
b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
arena_reserve(b, precomputed_size)
(b + llmemory.offsetof(SX, 'x')).signed[0] = 123
assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
llmemory.cast_adr_to_ptr(b, SPTR).x += 1
assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
arena_reset(a, myarenasize, True)
arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
arena_free(a)
return 42
示例13: malloc_varsize_clear
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length, can_collect,
has_finalizer=False):
# Only use the nursery if there are not too many items.
if not raw_malloc_usage(itemsize):
too_many_items = False
else:
# The following line is usually constant-folded because both
# min_nursery_size and itemsize are constants (the latter
# due to inlining).
maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
raw_malloc_usage(itemsize))
# The actual maximum length for our nursery depends on how
# many times our nursery is bigger than the minimal size.
# The computation is done in this roundabout way so that
# only the only remaining computation is the following
# shift.
maxlength = maxlength_for_minimal_nursery << self.nursery_scale
too_many_items = length > maxlength
if (has_finalizer or not can_collect or
too_many_items or
(raw_malloc_usage(size) > self.lb_young_var_basesize and
raw_malloc_usage(size) > self.largest_young_var_basesize)):
# ^^^ we do two size comparisons; the first one appears redundant,
# but it can be constant-folded if 'size' is a constant; then
# it almost always folds down to False, which kills the
# second comparison as well.
return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
itemsize, offset_to_length,
can_collect, has_finalizer)
# with the above checks we know now that totalsize cannot be more
# than about half of the nursery size; in particular, the + and *
# cannot overflow
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size + itemsize * length
result = self.nursery_free
if raw_malloc_usage(totalsize) > self.nursery_top - result:
result = self.collect_nursery()
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
(result + size_gc_header + offset_to_length).signed[0] = length
self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
示例14: copy
def copy(self, obj):
if self.is_forwarded(obj):
#llop.debug_print(lltype.Void, obj, "already copied to", self.get_forwarding_address(obj))
return self.get_forwarding_address(obj)
else:
newaddr = self.free
objsize = self.get_size(obj)
totalsize = self.size_gc_header() + objsize
llarena.arena_reserve(newaddr, totalsize)
raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
self.free += totalsize
newobj = newaddr + self.size_gc_header()
#llop.debug_print(lltype.Void, obj, "copied to", newobj,
# "tid", self.header(obj).tid,
# "size", totalsize)
self.set_forwarding_address(obj, newobj, objsize)
return newobj
示例15: malloc_varsize_clear
def malloc_varsize_clear(self, typeid, length, size, itemsize,
offset_to_length, can_collect,
has_finalizer=False):
if has_finalizer or not can_collect:
return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
itemsize, offset_to_length,
can_collect, has_finalizer)
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
# Compute the maximal length that makes the object still
# below 'nonlarge_max'. All the following logic is usually
# constant-folded because self.nonlarge_max, size and itemsize
# are all constants (the arguments are constant due to
# inlining) and self.has_gcptr_in_varsize() is constant-folded.
if self.has_gcptr_in_varsize(typeid):
nonlarge_max = self.nonlarge_gcptrs_max
else:
nonlarge_max = self.nonlarge_max
if not raw_malloc_usage(itemsize):
too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
else:
maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
maxlength = maxlength // raw_malloc_usage(itemsize)
too_many_items = length > maxlength
if not too_many_items:
# With the above checks we know now that totalsize cannot be more
# than 'nonlarge_max'; in particular, the + and * cannot overflow.
# Let's try to fit the object in the nursery.
totalsize = nonvarsize + itemsize * length
result = self.nursery_free
if raw_malloc_usage(totalsize) <= self.nursery_top - result:
llarena.arena_reserve(result, totalsize)
# GCFLAG_NO_YOUNG_PTRS is never set on young objs
self.init_gc_object(result, typeid, flags=0)
(result + size_gc_header + offset_to_length).signed[0] = length
self.nursery_free = result + llarena.round_up_for_allocation(
totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header,
llmemory.GCREF)
return self.malloc_varsize_slowpath(typeid, length)