本文整理汇总了Python中rpython.rlib.debug.debug_start函数的典型用法代码示例。如果您正苦于以下问题:Python debug_start函数的具体用法?Python debug_start怎么用?Python debug_start使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug_start函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_L2cache_linux2_sparc
def get_L2cache_linux2_sparc():
debug_start("gc-hardware")
cpu = 0
L2cache = sys.maxint
while True:
try:
fd = os.open('/sys/devices/system/cpu/cpu' + assert_str0(str(cpu))
+ '/l2_cache_size', os.O_RDONLY, 0644)
try:
line = os.read(fd, 4096)
finally:
os.close(fd)
end = len(line) - 1
assert end > 0
number = int(line[:end])
except OSError:
break
if number < L2cache:
L2cache = number
cpu += 1
debug_print("L2cache =", L2cache)
debug_stop("gc-hardware")
if L2cache < sys.maxint:
return L2cache
else:
# Print a top-level warning even in non-debug builds
llop.debug_print(lltype.Void,
"Warning: cannot find your CPU L2 cache size in "
"/sys/devices/system/cpu/cpuX/l2_cache_size")
return -1
示例2: detect_arch_version
def detect_arch_version(filename="/proc/cpuinfo"):
fd = os.open(filename, os.O_RDONLY, 0644)
n = 0
debug_start("jit-backend-arch")
try:
buf = os.read(fd, 2048)
if not buf:
n = 6 # we assume ARMv6 as base case
debug_print("Could not detect ARM architecture "
"version, assuming", "ARMv%d" % n)
finally:
os.close(fd)
# "Processor : ARMv%d-compatible processor rev 7 (v6l)"
i = buf.find('ARMv')
if i == -1:
n = 6
debug_print("Could not detect architecture version, "
"falling back to", "ARMv%d" % n)
else:
n = int(buf[i + 4])
if n < 6:
raise ValueError("Unsupported ARM architecture version")
debug_print("Detected", "ARMv%d" % n)
if n > 7:
n = 7
debug_print("Architecture version not explicitly supported, "
"falling back to", "ARMv%d" % n)
debug_stop("jit-backend-arch")
return n
示例3: disable_noninlinable_function
def disable_noninlinable_function(self, greenkey):
cell = self.jit_cell_at_key(greenkey)
cell.dont_trace_here = True
debug_start("jit-disableinlining")
loc = self.get_location_str(greenkey)
debug_print("disabled inlining", loc)
debug_stop("jit-disableinlining")
示例4: _do
def _do(self, goal, func, *args, **kwds):
title = func.task_title
if goal in self.done:
self.log.info("already done: %s" % title)
return
else:
self.log.info("%s..." % title)
debug_start('translation-task')
debug_print('starting', goal)
self.timer.start_event(goal)
try:
instrument = False
try:
if goal in PROFILE:
res = self._profile(goal, func)
else:
res = func()
except Instrument:
instrument = True
if not func.task_idempotent:
self.done[goal] = True
if instrument:
self.proceed('compile')
assert False, 'we should not get here'
finally:
try:
debug_stop('translation-task')
self.timer.end_event(goal)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
#import gc; gc.dump_rpy_heap('rpyheap-after-%s.dump' % goal)
return res
示例5: f
def f(x):
debug_start("mycat")
debug_print("foo", 2, "bar", x)
debug_stop("mycat")
debug_flush() # does nothing
debug_offset() # should not explode at least
return have_debug_prints()
示例6: set_nursery_size
def set_nursery_size(self, newsize):
debug_start("gc-set-nursery-size")
if newsize < self.min_nursery_size:
newsize = self.min_nursery_size
if newsize > self.space_size // 2:
newsize = self.space_size // 2
# Compute the new bounds for how large young objects can be
# (larger objects are allocated directly old). XXX adjust
self.nursery_size = newsize
self.largest_young_fixedsize = self.get_young_fixedsize(newsize)
self.largest_young_var_basesize = self.get_young_var_basesize(newsize)
scale = 0
while (self.min_nursery_size << (scale+1)) <= newsize:
scale += 1
self.nursery_scale = scale
debug_print("nursery_size =", newsize)
debug_print("largest_young_fixedsize =",
self.largest_young_fixedsize)
debug_print("largest_young_var_basesize =",
self.largest_young_var_basesize)
debug_print("nursery_scale =", scale)
# we get the following invariant:
assert self.nursery_size >= (self.min_nursery_size << scale)
# Force a full collect to remove the current nursery whose size
# no longer matches the bounds that we just computed. This must
# be done after changing the bounds, because it might re-create
# a new nursery (e.g. if it invokes finalizers).
self.semispace_collect()
debug_stop("gc-set-nursery-size")
示例7: get_total_memory_linux
def get_total_memory_linux(filename):
debug_start("gc-hardware")
result = -1.0
try:
fd = os.open(filename, os.O_RDONLY, 0644)
try:
buf = os.read(fd, 4096)
finally:
os.close(fd)
except OSError:
pass
else:
if buf.startswith('MemTotal:'):
start = _skipspace(buf, len('MemTotal:'))
stop = start
while stop < len(buf) and buf[stop].isdigit():
stop += 1
if start < stop:
result = float(buf[start:stop]) * 1024.0 # assume kB
if result < 0.0:
debug_print("get_total_memory() failed")
result = addressable_size
else:
debug_print("memtotal =", result)
if result > addressable_size:
result = addressable_size
debug_stop("gc-hardware")
return result
示例8: disable_noninlinable_function
def disable_noninlinable_function(self, greenkey):
cell = self.JitCell.ensure_jit_cell_at_key(greenkey)
cell.flags |= JC_DONT_TRACE_HERE
debug_start("jit-disableinlining")
loc = self.get_location_str(greenkey)
debug_print("disabled inlining", loc)
debug_stop("jit-disableinlining")
示例9: dump
def dump(self, memo):
if have_debug_prints():
debug_start("jit-log-exported-state")
debug_print("[" + ", ".join([x.repr_short(memo) for x in self.next_iteration_args]) + "]")
for box in self.short_boxes:
debug_print(" " + box.repr(memo))
debug_stop("jit-log-exported-state")
示例10: setup_once
def setup_once(self):
# the address of the function called by 'new'
gc_ll_descr = self.cpu.gc_ll_descr
gc_ll_descr.initialize()
if hasattr(gc_ll_descr, 'minimal_size_in_nursery'):
self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery
else:
self.gc_minimal_size_in_nursery = 0
if hasattr(gc_ll_descr, 'gcheaderbuilder'):
self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header
else:
self.gc_size_of_header = WORD # for tests
self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn)
self.memset_addr = self.cpu.cast_ptr_to_int(memset_fn)
self._build_failure_recovery(False, withfloats=False)
self._build_failure_recovery(True, withfloats=False)
self._build_wb_slowpath(False)
self._build_wb_slowpath(True)
self._build_wb_slowpath(False, for_frame=True)
# only one of those
self.build_frame_realloc_slowpath()
if self.cpu.supports_floats:
self._build_failure_recovery(False, withfloats=True)
self._build_failure_recovery(True, withfloats=True)
self._build_wb_slowpath(False, withfloats=True)
self._build_wb_slowpath(True, withfloats=True)
self._build_propagate_exception_path()
if gc_ll_descr.get_malloc_slowpath_addr is not None:
# generate few slowpaths for various cases
self.malloc_slowpath = self._build_malloc_slowpath(kind='fixed')
self.malloc_slowpath_varsize = self._build_malloc_slowpath(
kind='var')
if hasattr(gc_ll_descr, 'malloc_str'):
self.malloc_slowpath_str = self._build_malloc_slowpath(kind='str')
else:
self.malloc_slowpath_str = None
if hasattr(gc_ll_descr, 'malloc_unicode'):
self.malloc_slowpath_unicode = self._build_malloc_slowpath(
kind='unicode')
else:
self.malloc_slowpath_unicode = None
self.cond_call_slowpath = [self._build_cond_call_slowpath(False, False),
self._build_cond_call_slowpath(False, True),
self._build_cond_call_slowpath(True, False),
self._build_cond_call_slowpath(True, True)]
self._build_stack_check_slowpath()
self._build_release_gil(gc_ll_descr.gcrootmap)
if not self._debug:
# if self._debug is already set it means that someone called
# set_debug by hand before initializing the assembler. Leave it
# as it is
debug_start('jit-backend-counts')
self.set_debug(have_debug_prints())
debug_stop('jit-backend-counts')
# when finishing, we only have one value at [0], the rest dies
self.gcmap_for_finish = lltype.malloc(jitframe.GCMAP, 1,
flavor='raw',
track_allocation=False)
self.gcmap_for_finish[0] = r_uint(1)
示例11: load_linklet_from_fasl
def load_linklet_from_fasl(file_name, set_version=False):
from pycket.fasl import Fasl
from pycket.env import w_version
from pycket.util import console_log
from pycket.ast_vs_sexp import deserialize_loop
debug_start("loading-linklet")
debug_print("Loading linklet from fasl -- %s" % file_name)
sexp = Fasl().to_sexp_from_file(file_name)
version_sexp, linklet_sexp = W_String.make(""), None
if set_version:
version_sexp = sexp.car()
linklet_sexp = sexp.cdr()
else:
linklet_sexp = sexp
linklet = None
if "zo" in file_name:
linklet = deserialize_loop(linklet_sexp)
else:
console_log("Run pycket with --make-linklet-zos to make the compiled zo files for bootstrap linklets", 1)
compile_linklet = get_primitive("compile-linklet")
linklet = compile_linklet.call_interpret([linklet_sexp, W_Symbol.make("linkl"), w_false, w_false, w_false])
if set_version:
ver = version_sexp.as_str_ascii()
console_log("Setting the version to %s" % ver)
w_version.set_version(ver)
debug_stop("loading-linklet")
return linklet, version_sexp
示例12: send_loop_to_backend
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type,
orig_inpargs, memo):
forget_optimization_info(loop.operations)
forget_optimization_info(loop.inputargs)
vinfo = jitdriver_sd.virtualizable_info
if vinfo is not None:
vable = orig_inpargs[jitdriver_sd.index_of_virtualizable].getref_base()
patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd, vable)
original_jitcell_token = loop.original_jitcell_token
globaldata = metainterp_sd.globaldata
original_jitcell_token.number = n = globaldata.loopnumbering
globaldata.loopnumbering += 1
if not we_are_translated():
show_procedures(metainterp_sd, loop)
loop.check_consistency()
if metainterp_sd.warmrunnerdesc is not None:
hooks = metainterp_sd.warmrunnerdesc.hooks
debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops,
original_jitcell_token, loop.operations,
type, greenkey)
hooks.before_compile(debug_info)
else:
debug_info = None
hooks = None
operations = get_deep_immutable_oplist(loop.operations)
metainterp_sd.profiler.start_backend()
debug_start("jit-backend")
try:
loopname = jitdriver_sd.warmstate.get_location_str(greenkey)
unique_id = jitdriver_sd.warmstate.get_unique_id(greenkey)
asminfo = do_compile_loop(jitdriver_sd.index, unique_id, metainterp_sd,
loop.inputargs,
operations, original_jitcell_token,
name=loopname,
log=have_debug_prints(),
memo=memo)
finally:
debug_stop("jit-backend")
metainterp_sd.profiler.end_backend()
if hooks is not None:
debug_info.asminfo = asminfo
hooks.after_compile(debug_info)
metainterp_sd.stats.add_new_loop(loop)
if not we_are_translated():
metainterp_sd.stats.compiled()
metainterp_sd.log("compiled new " + type)
#
if asminfo is not None:
ops_offset = asminfo.ops_offset
else:
ops_offset = None
metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n,
type, ops_offset,
name=loopname)
#
if metainterp_sd.warmrunnerdesc is not None: # for tests
metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token)
示例13: send_bridge_to_backend
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token):
if not we_are_translated():
show_procedures(metainterp_sd)
seen = dict.fromkeys(inputargs)
TreeLoop.check_consistency_of_branch(operations, seen)
if metainterp_sd.warmrunnerdesc is not None:
hooks = metainterp_sd.warmrunnerdesc.hooks
debug_info = JitDebugInfo(
jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, "bridge", fail_descr=faildescr
)
hooks.before_compile_bridge(debug_info)
else:
hooks = None
debug_info = None
operations = get_deep_immutable_oplist(operations)
metainterp_sd.profiler.start_backend()
debug_start("jit-backend")
try:
asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token)
finally:
debug_stop("jit-backend")
metainterp_sd.profiler.end_backend()
if hooks is not None:
debug_info.asminfo = asminfo
hooks.after_compile_bridge(debug_info)
if not we_are_translated():
metainterp_sd.stats.compiled()
metainterp_sd.log("compiled new bridge")
#
if asminfo is not None:
ops_offset = asminfo.ops_offset
else:
ops_offset = None
metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, ops_offset)
示例14: get_L2cache_linux2_cpuinfo_s390x
def get_L2cache_linux2_cpuinfo_s390x(filename="/proc/cpuinfo", label='cache3'):
debug_start("gc-hardware")
L2cache = sys.maxint
try:
fd = os.open(filename, os.O_RDONLY, 0644)
try:
data = []
while True:
buf = os.read(fd, 4096)
if not buf:
break
data.append(buf)
finally:
os.close(fd)
except OSError:
pass
else:
data = ''.join(data)
linepos = 0
while True:
start = _findend(data, '\n' + label, linepos)
if start < 0:
break # done
linepos = _findend(data, '\n', start)
if linepos < 0:
break # no end-of-line??
# *** data[start:linepos] == " : level=2 type=Instruction scope=Private size=2048K ..."
start = _skipspace(data, start)
if data[start] != ':':
continue
# *** data[start:linepos] == ": level=2 type=Instruction scope=Private size=2048K ..."
start = _skipspace(data, start + 1)
# *** data[start:linepos] == "level=2 type=Instruction scope=Private size=2048K ..."
start += 44
end = start
while '0' <= data[end] <= '9':
end += 1
# *** data[start:end] == "2048"
if start == end:
continue
number = int(data[start:end])
# *** data[end:linepos] == " KB\n"
end = _skipspace(data, end)
if data[end] not in ('K', 'k'): # assume kilobytes for now
continue
number = number * 1024
# for now we look for the smallest of the L2 caches of the CPUs
if number < L2cache:
L2cache = number
debug_print("L2cache =", L2cache)
debug_stop("gc-hardware")
if L2cache < sys.maxint:
return L2cache
else:
# Print a top-level warning even in non-debug builds
llop.debug_print(lltype.Void,
"Warning: cannot find your CPU L2 cache size in /proc/cpuinfo")
return -1
示例15: _dump
def _dump(self, addr, logname, backend=None):
debug_start(logname)
if have_debug_prints():
#
if backend is not None:
debug_print('BACKEND', backend)
#
from rpython.jit.backend.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable:
debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable)
else:
debug_print('SYS_EXECUTABLE', '??')
#
HEX = '0123456789ABCDEF'
dump = []
src = rffi.cast(rffi.CCHARP, addr)
for p in range(self.get_relative_pos()):
o = ord(src[p])
dump.append(HEX[o >> 4])
dump.append(HEX[o & 15])
debug_print('CODE_DUMP',
'@%x' % addr,
'+0 ', # backwards compatibility
''.join(dump))
#
debug_stop(logname)