本文整理汇总了Python中memory_profiler.memory_usage方法的典型用法代码示例。如果您正苦于以下问题:Python memory_profiler.memory_usage方法的具体用法?Python memory_profiler.memory_usage怎么用?Python memory_profiler.memory_usage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类memory_profiler
的用法示例。
在下文中一共展示了memory_profiler.memory_usage方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: during_execution_memory_sampler
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def during_execution_memory_sampler():
"""Thread to sample memory usage"""
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
示例2: during_execution_memory_sampler
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
示例3: resource_used
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def resource_used(func):
""" Decorator that return a function that prints its usage
"""
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
t0 = time()
mem, out = memory_profiler.memory_usage((func, args, kwargs),
max_usage=True,
retval=True)
print("Run time: %.1is Memory used: %iMb"
% (time() - t0, mem))
return out
return wrapped_func
################################################################################
# Data Importing and preprocessing
# --------------------------------
#
# We first download the dataset:
示例4: _get_memory_base
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def _get_memory_base(gallery_conf):
"""Get the base amount of memory used by running a Python process."""
if not gallery_conf['plot_gallery']:
return 0.
# There might be a cleaner way to do this at some point
from memory_profiler import memory_usage
if sys.platform in ('win32', 'darwin'):
sleep, timeout = (1, 2)
else:
sleep, timeout = (0.5, 1)
proc = subprocess.Popen(
[sys.executable, '-c',
'import time, sys; time.sleep(%s); sys.exit(0)' % sleep],
close_fds=True)
memories = memory_usage(proc, interval=1e-3, timeout=timeout)
kwargs = dict(timeout=timeout) if sys.version_info >= (3, 5) else {}
proc.communicate(**kwargs)
# On OSX sometimes the last entry can be None
memories = [mem for mem in memories if mem is not None] + [0.]
memory_base = max(memories)
return memory_base
示例5: test_memory_usage_ok
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def test_memory_usage_ok():
import memory_profiler
dataset = DataSet(width=80, height=80,
rng=np.random.RandomState(42),
max_steps=100000, phi_length=4)
last = time.time()
for i in xrange(1000000000):
if (i % 100000) == 0:
print i
dataset.add_sample(np.random.random((80, 80)), 1, 1, False)
if i > 200000:
imgs, actions, rewards, terminals = \
dataset.random_batch(32)
if (i % 10007) == 0:
print time.time() - last
mem_usage = memory_profiler.memory_usage(-1)
print len(dataset), mem_usage
last = time.time()
示例6: checkpoint_memory
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def checkpoint_memory():
'''This test is meant to be run manually, since it depends on
memory_profiler and its behavior may vary.'''
try:
from memory_profiler import memory_usage
except ImportError:
return
def f(a):
for _ in range(10):
a = np.sin(a**2 + 1)
return a
checkpointed_f = checkpoint(f)
def testfun(f, x):
for _ in range(5):
x = f(x)
return np.sum(x)
gradfun = grad(testfun, 1)
A = npr.RandomState(0).randn(100000)
max_usage = max(memory_usage((gradfun, (f, A))))
max_checkpointed_usage = max(memory_usage((gradfun, (checkpointed_f, A))))
assert max_checkpointed_usage < max_usage / 2.
示例7: __init__
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def __init__(self):
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
self.previous_call_memory_usage = memory_profiler.memory_usage()[0]
self.t1 = time.time() # will be set to current time later
self.keep_watching = True
self.peak_memory_usage = -1
self.peaked_memory_usage = -1
self.memory_delta = 0
self.time_delta = 0
self.watching_memory = True
self.ip = get_ipython()
self.input_cells = self.ip.user_ns['In']
self._measurements = namedtuple(
'Measurements',
['memory_delta', 'time_delta', 'memory_peak', 'memory_usage'],
)
示例8: watch_memory
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def watch_memory(self):
if not self.watching_memory:
return
# calculate time delta using global t1 (from the pre-run
# event) and current time
self.time_delta = time.time() - self.t1
new_memory_usage = memory_profiler.memory_usage()[0]
self.memory_delta = new_memory_usage - self.previous_call_memory_usage
self.keep_watching = False
self.peaked_memory_usage = max(0, self.peak_memory_usage - new_memory_usage)
num_commands = len(self.input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.3f} MiB RAM in "
"{time_delta:0.3f}s, peaked {peaked_memory_usage:0.3f} "
"MiB above current, total RAM usage "
"{memory_usage:0.3f} MiB")
output = output_template.format(
time_delta=self.time_delta,
cmd=cmd,
memory_delta=self.memory_delta,
peaked_memory_usage=self.peaked_memory_usage,
memory_usage=new_memory_usage)
print(str(output))
self.previous_call_memory_usage = new_memory_usage
示例9: during_execution_memory_sampler
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def during_execution_memory_sampler(self):
import time
import memory_profiler
self.peak_memory_usage = -1
self.keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
self.peak_memory_usage = max(mem_usage, self.peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not self.keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or
# if it has run for more than a sane amount of time
# (e.g. maybe something crashed and we don't want this
# to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
示例10: watch_memory
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
示例11: watch_memory
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def watch_memory():
"""Prints the memory usage if watching the memory"""
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
示例12: encrypt_file
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def encrypt_file(data: BytesIO, fingerprint: str) -> str:
LOG.d("encrypt for %s", fingerprint)
mem_usage = memory_usage(-1, interval=1, timeout=1)[0]
LOG.d("mem_usage %s", mem_usage)
# todo
if mem_usage > 300:
LOG.error("Force exit")
hard_exit()
r = gpg.encrypt_file(data, fingerprint, always_trust=True)
if not r.ok:
# maybe the fingerprint is not loaded on this host, try to load it
mailbox = Mailbox.get_by(pgp_finger_print=fingerprint)
if mailbox:
LOG.d("(re-)load public key for %s", mailbox)
load_public_key(mailbox.pgp_public_key)
LOG.d("retry to encrypt")
data.seek(0)
r = gpg.encrypt_file(data, fingerprint, always_trust=True)
if not r.ok:
raise PGPException(f"Cannot encrypt, status: {r.status}")
return str(r)
示例13: cur_python_mem
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def cur_python_mem():
mem_usage = memory_usage(-1, interval=0.2, timeout=1)
return mem_usage
示例14: watch_memory
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, keep_watching, watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb
# calculate time delta using global t1 (from the pre-run event) and current time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = (
"{cmd} used {memory_delta:0.4f} Mb RAM in "
"{time_delta:0.2f}s, total RAM usage "
"{memory_usage:0.2f} Mb, total RAM "
"memory {total_memory:0.2f} Mb"
)
output = output_template.format(
time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
memory_usage=new_memory_usage,
total_memory=total_memory,
)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
示例15: test_visual_benchmark
# 需要导入模块: import memory_profiler [as 别名]
# 或者: from memory_profiler import memory_usage [as 别名]
def test_visual_benchmark(qtbot, vertex_shader_nohook, fragment_shader):
try:
from memory_profiler import memory_usage
except ImportError: # pragma: no cover
logger.warning("Skip test depending on unavailable memory_profiler module.")
return
class TestCanvas(QOpenGLWindow):
def paintGL(self):
gloo.clear()
program.draw('points')
program = gloo.Program(vertex_shader_nohook, fragment_shader)
canvas = TestCanvas()
canvas.show()
qtbot.waitForWindowShown(canvas)
def f():
for _ in range(100):
program['a_position'] = (-1 + 2 * np.random.rand(100_000, 2)).astype(np.float32)
canvas.update()
qtbot.wait(1)
mem = memory_usage(f)
usage = max(mem) - min(mem)
print(usage)
# NOTE: this test is failing currently because of a memory leak in the the gloo module.
# Recreating a buffer at every cluster selection causes a memory leak, once should ideally
# use a single large buffer and reuse that, even if the buffer's content is actually smaller.
# assert usage < 10
canvas.close()