本文整理汇总了Python中trace.Trace类的典型用法代码示例。如果您正苦于以下问题:Python Trace类的具体用法?Python Trace怎么用?Python Trace使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Trace类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestCallers
class TestCallers(unittest.TestCase):
"""White-box testing of callers tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countcallers=1)
self.filemod = my_file_and_modname()
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'),
(self.filemod + ('traced_func_importing_caller',))): 1,
((self.filemod + ('traced_func_simple_caller',)),
(self.filemod + ('traced_func_linear',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_simple_caller',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_importing',))): 1,
((self.filemod + ('traced_func_importing',)),
(fix_ext_py(testmod.__file__), 'testmod', 'func')): 1,
}
self.assertEqual(self.tracer.results().callers, expected)
示例2: __init__
class PerformanceMetric:
def __init__(self):
self.switches = {"VIDEO": Trace("seconds", "bps"), "AUDIO": Trace("seconds", "bps")}
self.buffer_levels = {"VIDEO": BufferLevelMetric(), "AUDIO": BufferLevelMetric()}
self.bps_history = Trace("seconds", "bps")
self.bps_history.append(0, 0)
self.buffer_levels["VIDEO"].append(0, 0)
self.buffer_levels["AUDIO"].append(0, 0)
self.switches["VIDEO"].append(0, 0)
self.switches["AUDIO"].append(0, 0)
@property
def underrun_count(self):
return self.buffer_levels["VIDEO"].underrun_count + self.buffer_levels["AUDIO"].underrun_count
def min_buffer_level(self):
return min(self.buffer_levels["VIDEO"].current_value, self.buffer_levels["AUDIO"].current_value)
""" So far, use reciproc of underruns to give a score. 1.0 perfect score """
def score(self):
return 1.0 / (self.underrun_count + 1)
def print_stats(self):
print("Score: %.4f" % self.score())
print("Underruns: %d" % self.underrun_count)
print self.switches["VIDEO"]
示例3: TestRunExecCounts
class TestRunExecCounts(unittest.TestCase):
"""A simple sanity test of line-counting, via runctx (exec)"""
def setUp(self):
self.my_py_filename = fix_ext_py(__file__)
self.addCleanup(sys.settrace, sys.gettrace())
def test_exec_counts(self):
self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
code = r'''traced_func_loop(2, 5)'''
code = compile(code, __file__, 'exec')
self.tracer.runctx(code, globals(), vars())
firstlineno = get_firstlineno(traced_func_loop)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(self.my_py_filename, firstlineno + 2): 6,
(self.my_py_filename, firstlineno + 3): 5,
(self.my_py_filename, firstlineno + 4): 1,
}
# When used through 'run', some other spurious counts are produced, like
# the settrace of threading, which we ignore, just making sure that the
# counts fo traced_func_loop were right.
#
for k in expected.keys():
self.assertEqual(self.tracer.results().counts[k], expected[k])
示例4: test_linear_methods
def test_linear_methods(self):
# XXX todo: later add 'static_method_linear' and 'class_method_linear'
# here, once issue1764286 is resolved
#
for methname in ["inst_method_linear"]:
tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
traced_obj = TracedClass(25)
method = getattr(traced_obj, methname)
tracer.runfunc(method, 20)
firstlineno = get_firstlineno(method)
expected = {(self.my_py_filename, firstlineno + 1): 1}
self.assertEqual(tracer.results().counts, expected)
示例5: get_next_trace
def get_next_trace(self):
new_trace = Trace([400], self.screen, noise=True)
for i, (current_peak, next_peak) in enumerate(zip(self.current_trace.peaks, new_trace.peaks)):
new_pos = self.get_new_peak_position(current_peak)
while new_pos <= self.bottom_pad or new_pos >= self.dim-self.top_pad:
new_pos = self.get_new_peak_position(current_peak)
new_trace.peaks[i] = new_pos
#print new_trace.peaks
if not self.artificial:
new_trace.data = self.survey.data[self.current_real_trace, :]
self.current_real_trace += 1
return new_trace
示例6: init_traces
def init_traces(self):
with open('1.tra', 'r') as f:
trace_number = int(f.readline())
for trace_counter in range(trace_number):
t = Trace()
point_number = int(f.readline())
points = []
for point_counter in range(point_number):
point_s = f.readline()[:-1]
point = Point([int(x) for x in point_s.split(' ')])
points.append(point)
t.points = points
self.traces.append(t)
self.current_temp_places.append(None)
示例7: mh_query2
def mh_query2(model, pred, answer, samples_count, lag=1):
"""
Metropolis-Hastings algorithm for sampling
:param model: model to execute
:param samples_count: how much samples we want
:type samples_count: int
:return: samples
:rtype: list
"""
MCMC_shared.mh_flag = True
MCMC_shared.iteration = 0
samples = []
miss = True
model()
transitions = 0
rejection = 0
while len(samples) < samples_count:
MCMC_shared.iteration += 1
variables = MCMC_shared.trace.get_vector()
vector_vals_drift = variables.values()
vector = [val[0] for val in vector_vals_drift]
drifts = [val[1] for val in vector_vals_drift]
shifted_vector = numpy.random.multivariate_normal(vector, numpy.diag(drifts))
new_trace = Trace(MCMC_shared.trace)
new_trace.set_vector(dict(zip(variables.keys(), shifted_vector.tolist())), MCMC_shared.iteration)
old_trace = MCMC_shared.trace
MCMC_shared.trace = new_trace
sample = model()
while not miss and new_trace._likelihood == -float("inf"):
new_trace.clean(MCMC_shared.iteration)
new_trace._likelihood = 0
for name, (chunk, iteration) in new_trace.mem.items():
if chunk.erp.log_likelihood(chunk.x, *chunk.erp_parameters) == -float("inf"):
new_chunk = Chunk(chunk.erp,
numpy.random.normal(old_trace.get(name).x, chunk.drift / 2),
chunk.erp_parameters,
drift=chunk.drift)
new_trace.store(name, new_chunk, iteration)
sample = model()
MCMC_shared.trace = old_trace
probability = log(uniform())
# r = erp.log_proposal_prob()
if probability < new_trace._likelihood - old_trace._likelihood and (miss or pred(sample)):
if miss and pred(sample):
miss = False
MCMC_shared.drift = 0.05
transitions += 1
if (transitions % lag) == 0:
if not miss:
# print sample, rejection
samples.append(answer(sample))
rejection = 0
MCMC_shared.trace = new_trace
MCMC_shared.trace.clean(MCMC_shared.iteration)
else:
rejection += 1
return samples
示例8: trim_lat_df
def trim_lat_df(self, start, lat_df):
if lat_df.empty:
return lat_df
lat_df = Trace.squash_df(lat_df, start, lat_df.index[-1], "t_delta")
# squash_df only updates t_delta, remove t_start to make sure it's not used
return lat_df.drop('t_start', 1)
示例9: upsert_bulk
def upsert_bulk(self, _index, type_key, id_key, bulk):
"Updates a bulk of documents in the same index."
"The type and id of each document are encoded in the document. Keys are provided to retrieve them"
"The type and id fields are removed in the document after inserted as _type and _id"
"Returns the number of documents upserted."
docs_upserted = 0
for document in bulk:
_type = document[type_key]
_id = document[id_key]
del document[type_key]
del document[id_key]
upserted = self.upsert_document(_index, _type, _id, document)
Trace.info("upserted: " + json.dumps(upserted))
if (upserted["_id"] == _id):
docs_upserted += 1
return docs_upserted
示例10: _run_with_trace
def _run_with_trace(self):
from trace import Trace
trace = Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=False,
count=True)
try:
trace.runfunc(self._run_tests)
finally:
results = trace.results()
real_stdout = sys.stdout
sys.stdout = open(self.coverage_summary, 'w')
try:
results.write_results(show_missing=True, summary=True,
coverdir=self.coverage_dir)
finally:
sys.stdout.close()
sys.stdout = real_stdout
示例11: BufferLevelMetric
class BufferLevelMetric(Trace):
def __init__(self):
Trace.__init__(self, "seconds", "seconds")
self._level = 0.0 # in seconds
self._underruns = Trace("seconds", "underrun duration (seconds)")
@property
def underrun_count(self):
return self._underruns.count
def increase_by(self, absolute_time, level_increase):
val = self.current_value
if val is None:
val = 0.0
self.append(absolute_time, val + level_increase)
def decrease_by(self, absolute_time, level_decrease):
val = self.current_value
if val is None:
val = 0.0
val -= level_decrease
if val <= 0.0:
# record an underrun [time, duration_of_underrun]
self._underruns.append(absolute_time, abs(val))
# clamp level to 0.0
val = 0.0
self.append(absolute_time, val)
@property
def level(self):
"""
Alias for current_value or current_y_value
:return: current_y_value
"""
return self.current_value
def __unicode__(self):
val = self.current_value
if val is not None:
return "BufferLevel(t=%.2fs): %.2fs" % (self.current_x_value, self.current_value)
else:
return "BufferLevel(t=0): 0"
def __str__(self):
return self.__unicode__()
示例12: __init__
def __init__(self, *args, **kwargs):
super(TestTrace, self).__init__(*args, **kwargs)
self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt')
self.platform = self._get_platform()
self.trace_path = os.path.join(self.traces_dir, 'trace.txt')
self.trace = Trace(self.platform, self.trace_path, self.events)
示例13: plotdir
def plotdir(run_dir, platform):
global args
tasks = None
pa = None
# Load RTApp performance data
try:
pa = PerfAnalysis(run_dir)
# Get the list of RTApp tasks
tasks = pa.tasks()
logging.info('Tasks: %s', tasks)
except ValueError:
pa = None
logging.info('No performance data found')
# Load Trace Analysis modules
trace = Trace(platform, run_dir)
# Define time ranges for all the temporal plots
trace.setXTimeRange(args.tmin, args.tmax)
# Tasks plots
if 'tasks' in args.plots:
trace.analysis.tasks.plotTasks(tasks)
if pa:
for task in tasks:
pa.plotPerf(task)
# Cluster and CPUs plots
if 'clusters' in args.plots:
trace.analysis.frequency.plotClusterFrequencies()
if 'cpus' in args.plots:
trace.analysis.cpus.plotCPU()
# SchedTune plots
if 'stune' in args.plots:
trace.analysis.eas.plotSchedTuneConf()
if 'ediff' in args.plots:
trace.analysis.eas.plotEDiffTime();
if 'edspace' in args.plots:
trace.analysis.eas.plotEDiffSpace();
示例14: __init__
def __init__(self, bitrates):
Adaptation.__init__(self, bitrates)
self.max_seconds = 50.0
self.level_low_seconds = 10.0 # critical buffer level. Fill as fast as possible until level_high_seconds reached if below this value
self.level_high_seconds = 30.0 # stable buffer level. Try to maintain current bitrate or improve it
self.bps_history = Trace("time", "bps")
self.bitrate_selections = {"AUDIO": Trace("time", "Audio-Bitrate selections"),
"VIDEO": Trace("time", "Video-Bitrate selections")}
self.sim_state = None
# self.bitrate_selections["VIDEO"].append(-1, 0)
# self.bitrate_selections["AUDIO"].append(-1, 0)
self.my_bps = Trace("seconds", "bps")
self.ma4_filter = alg.IterativeMovingAverage(4)
self.ma10_filter = alg.IterativeMovingAverage(10)
self.ma50_filter = alg.IterativeMovingAverage(80)
self. last_index = 0
示例15: TestFuncs
class TestFuncs(unittest.TestCase):
"""White-box testing of funcs tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countfuncs=1)
self.filemod = my_file_and_modname()
self._saved_tracefunc = sys.gettrace()
def tearDown(self):
if self._saved_tracefunc is not None:
sys.settrace(self._saved_tracefunc)
def test_simple_caller(self):
self.tracer.runfunc(traced_func_simple_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
self.filemod + ('traced_func_importing_caller',): 1,
self.filemod + ('traced_func_importing',): 1,
(fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_inst_method_calling(self):
obj = TracedClass(20)
self.tracer.runfunc(obj.inst_method_calling, 1)
expected = {
self.filemod + ('TracedClass.inst_method_calling',): 1,
self.filemod + ('TracedClass.inst_method_linear',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)