本文整理汇总了Python中resource.getrlimit方法的典型用法代码示例。如果您正苦于以下问题:Python resource.getrlimit方法的具体用法?Python resource.getrlimit怎么用?Python resource.getrlimit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类resource
的用法示例。
在下文中一共展示了resource.getrlimit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __enter__
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print "this tests triggers the Crash Reporter, that is intentional"
sys.stdout.flush()
示例2: test_urandom_failure
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
示例3: _set
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def _set(self):
self.ulimit = {}
for key in self.ulimit_options:
set_value = self.params.get("vt_ulimit_%s" % key)
if not set_value:
continue
# get default ulimit values in tuple (soft, hard)
self.ulimit[key] = resource.getrlimit(self.ulimit_options[key])
logging.info("Setting ulimit %s to %s." % (key, set_value))
if set_value == "ulimited":
set_value = resource.RLIM_INFINITY
elif set_value.isdigit():
set_value = int(set_value)
else:
self.test.error("%s is not supported for "
"setting ulimit %s" % (set_value, key))
try:
resource.setrlimit(self.ulimit_options[key],
(set_value, set_value))
except ValueError as error:
self.test.error(str(error))
示例4: test_rlimit_get
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def test_rlimit_get(self):
import resource
p = psutil.Process(os.getpid())
names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
assert names, names
for name in names:
value = getattr(psutil, name)
self.assertGreaterEqual(value, 0)
if name in dir(resource):
self.assertEqual(value, getattr(resource, name))
# XXX - On PyPy RLIMIT_INFINITY returned by
# resource.getrlimit() is reported as a very big long
# number instead of -1. It looks like a bug with PyPy.
if PYPY:
continue
self.assertEqual(p.rlimit(value), resource.getrlimit(value))
else:
ret = p.rlimit(value)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
示例5: batch_samples
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def batch_samples(samples, threads):
""" Split up samples into batches
assert: batch_size * threads < max_open
assert: len(batches) == threads
"""
import resource
import math
max_open = int(0.8 * resource.getrlimit(resource.RLIMIT_NOFILE)[0]) # max open files on system
max_size = math.floor(max_open/threads) # max batch size to avoid exceeding max_open
min_size = math.ceil(len(samples)/float(threads)) # min batch size to use all threads
size = min(min_size, max_size)
batches = []
batch = []
for sample in samples:
batch.append(sample)
if len(batch) >= size:
batches.append(batch)
batch = []
if len(batch) > 0: batches.append(batch)
return batches
示例6: _fallbackFDImplementation
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def _fallbackFDImplementation(self):
"""
Fallback implementation where either the resource module can inform us
about the upper bound of how many FDs to expect, or where we just guess
a constant maximum if there is no resource module.
All possible file descriptors from 0 to that upper bound are returned
with no attempt to exclude invalid file descriptor values.
"""
try:
import resource
except ImportError:
maxfds = 1024
else:
# OS-X reports 9223372036854775808. That's a lot of fds to close.
# OS-X should get the /dev/fd implementation instead, so mostly
# this check probably isn't necessary.
maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
return range(maxfds)
示例7: increase_limit_nofiles
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def increase_limit_nofiles():
soft_limit, hard_limit = getrlimit(RLIMIT_NOFILE)
desired_limit = 6000 # This should be comfortably larger than the product of services and regions
if hard_limit < desired_limit:
print("-" * 80, file=stderr)
print(
"WARNING!\n"
"Your system limits the number of open files and network connections to {}.\n"
"This may lead to failures during querying.\n"
"Please increase the hard limit of open files to at least {}.\n"
"The configuration for hard limits is often found in /etc/security/limits.conf".format(
hard_limit, desired_limit
),
file=stderr
)
print("-" * 80, file=stderr)
print(file=stderr)
target_soft_limit = min(desired_limit, hard_limit)
if target_soft_limit > soft_limit:
print("Increasing the open connection limit \"nofile\" from {} to {}.".format(soft_limit, target_soft_limit))
setrlimit(RLIMIT_NOFILE, (target_soft_limit, hard_limit))
print("")
示例8: prevent_core_dump
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def prevent_core_dump():
""" Prevent this process from generating a core dump.
Sets the soft and hard limits for core dump size to zero. On
Unix, this prevents the process from creating core dump
altogether.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value
core_limit_prev = resource.getrlimit(core_resource)
except ValueError, exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit (%(exc)s)"
% vars())
raise error
# Set hard and soft limits to zero, i.e. no core dump at all
示例9: prevent_core_dump
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def prevent_core_dump():
""" Prevent this process from generating a core dump.
Sets the soft and hard limits for core dump size to zero. On
Unix, this prevents the process from creating core dump
altogether.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value
resource.getrlimit(core_resource)
except ValueError as exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit (%s)" % exc)
raise error
# Set hard and soft limits to zero, i.e. no core dump at all
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
示例10: close_fds
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set(fd for fd in range(3, max_nfds))
open_fds.add(0)
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass
示例11: _reset_file_descriptors
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def _reset_file_descriptors(self):
"""Close open file descriptors and redirect standard streams."""
if self.close_open_files:
# Attempt to determine the max number of open files
max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if max_fds == resource.RLIM_INFINITY:
# If the limit is infinity, use a more reasonable limit
max_fds = 2048
else:
# If we're not closing all open files, we at least need to
# reset STDIN, STDOUT, and STDERR.
max_fds = 3
for fd in range(max_fds):
try:
os.close(fd)
except OSError:
# The file descriptor probably wasn't open
pass
# Redirect STDIN, STDOUT, and STDERR to /dev/null
devnull_fd = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
示例12: get_maxfd
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
示例13: collect_resource
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def collect_resource(info_add):
try:
import resource
except ImportError:
return
limits = [attr for attr in dir(resource) if attr.startswith('RLIMIT_')]
for name in limits:
key = getattr(resource, name)
value = resource.getrlimit(key)
info_add('resource.%s' % name, value)
call_func(info_add, 'resource.pagesize', resource, 'getpagesize')
示例14: limitedTime
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def limitedTime(second, func, *args, **kw):
second = fixTimeout(second)
old_alarm = signal(SIGXCPU, signalHandler)
current = getrlimit(RLIMIT_CPU)
try:
setrlimit(RLIMIT_CPU, (second, current[1]))
return func(*args, **kw)
finally:
setrlimit(RLIMIT_CPU, current)
signal(SIGXCPU, old_alarm)
示例15: getMemoryLimit
# 需要导入模块: import resource [as 别名]
# 或者: from resource import getrlimit [as 别名]
def getMemoryLimit():
try:
limit = getrlimit(RLIMIT_AS)[0]
if 0 < limit:
limit *= PAGE_SIZE
return limit
except ValueError:
return None