本文整理汇总了Python中tracemalloc.get_traced_memory函数的典型用法代码示例。如果您正苦于以下问题:Python get_traced_memory函数的具体用法?Python get_traced_memory怎么用?Python get_traced_memory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_traced_memory函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_traced_memory
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
示例2: test_task_memory_threshold
def test_task_memory_threshold(self):
diff = None
def log_func():
nonlocal diff
size, max_size = tracemalloc.get_traced_memory()
diff = (size - old_size)
obj_size = 1024 * 1024
threshold = int(obj_size * 0.75)
old_size, max_size = tracemalloc.get_traced_memory()
task = tracemalloctext.Task(log_func)
task.set_memory_threshold(threshold)
task.schedule()
# allocate
obj = allocate_bytes(obj_size)
time.sleep(MEMORY_CHECK_DELAY)
self.assertIsNotNone(diff)
self.assertGreaterEqual(diff, threshold)
# release
diff = None
old_size, max_size = tracemalloc.get_traced_memory()
obj = None
time.sleep(MEMORY_CHECK_DELAY)
size, max_size = tracemalloc.get_traced_memory()
self.assertIsNotNone(diff)
self.assertLessEqual(diff, threshold)
示例3: check_track
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
示例4: on_epoch_end
def on_epoch_end(self, last_metrics, **kwargs):
cpu_used, cpu_peak = list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory()))
self.peak_monitor_stop()
gpu_used = gpu_mem_get_used_no_cache() - self.gpu_before
gpu_peak = self.gpu_mem_used_peak - self.gpu_before
# can be negative, due to unreliable peak monitor thread
if gpu_peak < 0: gpu_peak = 0
# since we want the overhead only, subtract delta used if it's positive
elif gpu_used > 0: gpu_peak -= gpu_used
# The numbers are deltas in MBs (beginning of the epoch and the end)
return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak])
示例5: tracemalloc_dump
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
示例6: add_tracemalloc_metrics
def add_tracemalloc_metrics(snapshot):
size, max_size = tracemalloc.get_traced_memory()
snapshot.add_metric('tracemalloc.traced.size', size, 'size')
snapshot.add_metric('tracemalloc.traced.max_size', max_size, 'size')
if snapshot.traces:
snapshot.add_metric('tracemalloc.traces', len(snapshot.traces), 'int')
size, free = tracemalloc.get_tracemalloc_memory()
snapshot.add_metric('tracemalloc.module.size', size, 'size')
snapshot.add_metric('tracemalloc.module.free', free, 'size')
if size:
frag = free / size
snapshot.add_metric('tracemalloc.module.fragmentation', frag, 'percent')
示例7: schedule
def schedule(self):
task = self._task_ref()
memory_threshold = task.get_memory_threshold()
delay = task.get_delay()
if memory_threshold is not None:
traced = tracemalloc.get_traced_memory()[0]
self.min_memory = traced - memory_threshold
self.max_memory = traced + memory_threshold
else:
self.min_memory = None
self.max_memory = None
if delay is not None:
self.timeout = _time_monotonic() + delay
else:
self.timeout = None
示例8: once
def once(self):
delay = None
if self.min_memory is not None:
traced = tracemalloc.get_traced_memory()[0]
if traced <= self.min_memory:
return None
if traced >= self.max_memory:
return None
delay = self.memory_delay
if self.timeout is not None:
dt = (self.timeout - _time_monotonic())
if dt <= 0:
return None
if delay is not None:
delay = min(delay, dt)
else:
delay = dt
return delay
示例9: compute
def compute(self):
args = self.args
if args.track_memory:
if MS_WINDOWS:
from perf._win_memory import get_peak_pagefile_usage
else:
from perf._memory import PeakMemoryUsageThread
mem_thread = PeakMemoryUsageThread()
mem_thread.start()
if args.tracemalloc:
import tracemalloc
tracemalloc.start()
WorkerTask.compute(self)
if args.tracemalloc:
traced_peak = tracemalloc.get_traced_memory()[1]
tracemalloc.stop()
if not traced_peak:
raise RuntimeError("tracemalloc didn't trace any Python "
"memory allocation")
# drop timings, replace them with the memory peak
self._set_memory_value(traced_peak)
if args.track_memory:
if MS_WINDOWS:
mem_peak = get_peak_pagefile_usage()
else:
mem_thread.stop()
mem_peak = mem_thread.peak_usage
if not mem_peak:
raise RuntimeError("failed to get the memory peak usage")
# drop timings, replace them with the memory peak
self._set_memory_value(mem_peak)
示例10: fastahack_fetch
def fastahack_fetch(n):
print('timings for fastahack.FastaHack')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = fastahack.FastaHack(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_fastahack(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = fastahack.FastaHack(fa_file.name)
read_fastahack(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
示例11: pyfaidx_bgzf_faidx
def pyfaidx_bgzf_faidx(n):
print('timings for pyfaidx.Faidx with bgzf compression')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfaidx.Faidx(fa_file.name + '.gz')
ti.append(time.time() - t)
t = time.time()
read_faidx(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = pyfaidx.Faidx(fa_file.name + '.gz')
read_faidx(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
示例12: pyfaidx_fasta
def pyfaidx_fasta(n):
print('timings for pyfaidx.Fasta')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfaidx.Fasta(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = pyfaidx.Fasta(fa_file.name)
read_dict(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
示例13: seqio_read
def seqio_read(n):
print('timings for Bio.SeqIO')
ti = []
tf = []
for _ in range(n):
t = time.time()
fh = open(fa_file.name)
f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
fh.close()
# profile memory usage and report timings
tracemalloc.start()
fh = open(fa_file.name)
f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
read_dict(f, headers)
fh.close()
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/100*1000*1000)
tracemalloc.stop()
示例14: pyfasta_fseek
def pyfasta_fseek(n):
print('timings for pyfasta.Fasta (fseek)')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
# profile memory usage and report timings
tracemalloc.start()
f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
read_dict(f, headers)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
示例15: loop
def loop(*, size, times):
for i in range(times):
print(len(L))
g(size)
print([t._format_size(x, False) for x in t.get_traced_memory()])
snapshot = t.take_snapshot().filter_traces(
(
t.Filter(False, "<frozen importlib._bootstrap>"),
t.Filter(False, "*tracemalloc*"),
t.Filter(False, "*linecache*"),
t.Filter(False, "*sre_*"),
t.Filter(False, "*re.py"),
t.Filter(False, "*fnmatch*"),
t.Filter(False, "*tokenize*"),
t.Filter(False, "<unknown>"),
)
)
for stat in snapshot.statistics("lineno", cumulative=False)[:3]:
print("----------------------------------------")
print(t._format_size(stat.size, False))
for line in stat.traceback.format():
print(line)
print("========================================")