本文整理汇总了Python中sys.setrecursionlimit函数的典型用法代码示例。如果您正苦于以下问题:Python setrecursionlimit函数的具体用法?Python setrecursionlimit怎么用?Python setrecursionlimit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了setrecursionlimit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recursion_limit
def recursion_limit(n):
"""Context manager that temporarily sets Python's recursion limit to 'n', and restores the
previous recursion limit when the context is exited."""
m = sys.getrecursionlimit()
sys.setrecursionlimit(n)
yield
sys.setrecursionlimit(m)
示例2: increase_python_stack
def increase_python_stack():
# Workaround sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Default python allows recursion depth of 1000 calls.
sys.setrecursionlimit(10000)
示例3: _nestingTest
def _nestingTest(self, nestedObject, expected):
limit = sys.getrecursionlimit()
sys.setrecursionlimit(100)
try:
self.assertStringEqual(self.flatten(nestedObject), expected)
finally:
sys.setrecursionlimit(limit)
示例4: mergeSort
def mergeSort(alist1):
sys.setrecursionlimit(100000)
if len(alist1)>1:
mid1=len(alist1)//2
lefthalf1 = alist1[:mid1]
righthalf1 = alist1[mid1:]
mergeSort(lefthalf1)
mergeSort(righthalf1)
i=0
j=0
k=0
while i<len(lefthalf1) and j<len(righthalf1):
if lefthalf1[i]<righthalf1[j]:
alist1[k]=lefthalf1[i]
i=i+1
else:
alist1[k]=righthalf1[j]
j=j+1
k=k+1
while i<len(lefthalf1):
alist1[k]=lefthalf1[i]
i=i+1
k=k+1
while j<len(righthalf1):
alist1[k]=righthalf1[j]
j=j+1
k=k+1
示例5: test_setrecursionlimit_recursion_depth
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
示例6: save
def save(filepath, obj):
try:
_save(filepath, obj)
except RuntimeError, e:
""" Sometimes for large theano graphs, pickle/cPickle exceed the
maximum recursion depth. This seems to me like a fundamental
design flaw in pickle/cPickle. The workaround I employ here
is the one recommended to someone who had a similar problem
on stackexchange:
http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
The workaround is just to raise the max recursion depth.
Obviously this does not scale and could cause a crash
but I don't see another solution short of writing our
own implementation of pickle.
"""
if str(e).find('recursion') != -1:
warnings.warn('pylearn2.utils.save encountered the following error: ' \
+ str(e) + \
'\nAttempting to resolve this error by calling ' + \
'sys.setrecusionlimit and retrying')
sys.setrecursionlimit(50000)
_save(filepath, obj)
示例7: sbo
def sbo(self, name):
"""
Build all dependencies of a package
"""
if self.meta.rsl_deps in ["on", "ON"] and self.flag != "--resolve-off":
try:
sys.setrecursionlimit(10000)
dependencies = []
requires = SBoGrep(name).requires()
if requires:
for req in requires:
status(0.03)
# toolbar_width = status(index, toolbar_width, 1)
# avoid to add %README% as dependency and
# if require in blacklist
if "%README%" not in req and req not in self.blacklist:
dependencies.append(req)
if dependencies:
self.dep_results.append(dependencies)
for dep in dependencies:
self.sbo(dep)
return self.dep_results
except KeyboardInterrupt:
print("") # new line at exit
sys.exit(0)
else:
return []
示例8: main
def main(command, filename):
sys.setrecursionlimit(2000)
try:
lexer = OrgLexer(filename)
tokens = lexer.tokenize()
parser = OrgParser(tokens)
todo = parser.parse()
except LexerException as e:
print(e.message)
exit(1)
except ParserException as e:
print(e.message)
exit(1)
if "week" == command:
schedule = Schedule(todo)
print(schedule)
elif 'today' == command:
schedule = Schedule(todo)
schedule.days = schedule.days[0:1]
print("{}:".format(schedule.days[0].name))
print(schedule)
elif "tomorrow" == command:
schedule = Schedule(todo)
schedule.days = schedule.days[1:2]
print("{}:".format(schedule.days[0].name))
print(schedule)
elif "active" == command:
active_todo = todo.get_active()
print(active_todo)
else:
usage()
exit(1)
示例9: __init__
def __init__(self, G1, G2):
"""Initialize GraphMatcher.
Suppose G1 and G2 are undirected graphs.
>>> G1=nx.path_graph(4)
>>> G2=nx.path_graph(4)
>>> GM = nx.GraphMatcher(G1,G2)
creates a GraphMatcher which only checks for syntactic feasibility.
"""
self.G1 = G1
self.G2 = G2
# Set recursion limit.
self.old_recursion_limit = sys.getrecursionlimit()
expected_max_recursion_level = len(self.G2)
if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
# Give some breathing room.
sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
# Declare that we will be searching for a graph-graph isomorphism.
self.test = 'graph'
# Initialize the isomorphism mapping.
self.state = GMState(self)
示例10: expected
def expected(hic_data, bads=None, signal_to_noise=0.05, inter_chrom=False, **kwargs):
"""
Computes the expected values by averaging observed interactions at a given
distance in a given HiC matrix.
:param hic_data: dictionary containing the interaction data
:param None bads: dictionary with column not to be considered
:param 0.05 signal_to_noise: to calculate expected interaction counts,
if not enough reads are observed at a given distance the observations
of the distance+1 are summed. a signal to noise ratio of < 0.05
corresponds to > 400 reads.
:returns: a vector of biases (length equal to the size of the matrix)
"""
min_n = signal_to_noise ** -2. # equals 400 when default
size = len(hic_data)
try:
if not inter_chrom:
size = max(hic_data.chromosomes.values())
except AttributeError:
pass
if size > 1200:
import sys
sys.setrecursionlimit(size + 100)
expc = {}
dist = 0
while dist < size:
diag = []
new_dist, val = _meandiag(hic_data, dist, diag, min_n, size, bads)
for dist in range(dist, new_dist + 1):
expc[dist] = val
return expc
示例11: __init__
def __init__(self, G1, G2):
"""Initialize GraphMatcher.
Parameters
----------
G1,G2: NetworkX Graph or MultiGraph instances.
The two graphs to check for isomorphism.
Examples
--------
To create a GraphMatcher which checks for syntactic feasibility:
>>> G1 = nx.path_graph(4)
>>> G2 = nx.path_graph(4)
>>> GM = nx.GraphMatcher(G1,G2)
"""
self.G1 = G1
self.G2 = G2
self.G1_nodes = set(G1.nodes())
self.G2_nodes = set(G2.nodes())
# Set recursion limit.
self.old_recursion_limit = sys.getrecursionlimit()
expected_max_recursion_level = len(self.G2)
if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
# Give some breathing room.
sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
# Declare that we will be searching for a graph-graph isomorphism.
self.test = 'graph'
# Initialize state
self.initialize()
示例12: build_model_by_method
def build_model_by_method(filename):
sys.setrecursionlimit(10000)
f = open(filename,'r')
tree = ET.parse(f)
root = tree.getroot()
schedule = {}
next = {}
for child in root.findall('Activity'):
id = child.find('id').text
start_date = get_child(child,'start_date')
finish_date = get_child(child,'finish_date')
duration = get_child(child,'duration')
not_early_date = get_child(child,'not_early_date')
a = Activity(id, start_date, finish_date, duration, not_early_date)
schedule[id] = a
next_activity = '' if child.find('next_activity').text is None else child.find('next_activity').text
next[id] = next_activity
for key in schedule:
if next[key] != '':
for next_id in next[key].split(';'):
schedule[key].append_next(schedule[next_id])
sys.setrecursionlimit(1000)
示例13: main
def main():
import argparse
parser = argparse.ArgumentParser(
prog="lipy", description='lipy - a pythonic lisp'
)
parser.add_argument("--test", "-t", action="store_true")
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--eval", "-e")
parser.add_argument("--tree", action="store_true")
parser.add_argument("file", nargs="?")
args = parser.parse_args()
if args.test:
test()
elif args.eval:
evals(args.eval, args.tree)
elif args.file:
sys.setrecursionlimit(40000)
if args.file == "-":
evals(sys.stdin.read(), args.tree)
else:
evals(file(args.file).read(), args.tree)
else:
parser.print_help()
if args.verbose:
print("Done, max_stack =", max_stack)
示例14: exe
def exe(self, mainloop):
"""
Pickle the mainloop
"""
if np.mod(mainloop.trainlog._batch_seen, self.freq) == 0:
pkl_path = mainloop.name + '.pkl'
path = os.path.join(self.path, pkl_path)
logger.info("\tSaving model to: %s" % path)
try:
import sys
sys.setrecursionlimit(50000)
f = open(path, 'wb')
cPickle.dump(mainloop, f, -1)
f.close()
#secure_pickle_dump(mainloop, path)
except Exception:
raise
if np.mod(mainloop.trainlog._batch_seen, self.force_save_freq) == 0:
force_pkl_path = mainloop.name + '_' +\
str(mainloop.trainlog._batch_seen) +\
'updates.pkl'
force_path = os.path.join(self.path, force_pkl_path)
logger.info("\tSaving model to: %s" % force_path)
try:
import sys
sys.setrecursionlimit(50000)
f = open(force_path, 'wb')
cPickle.dump(mainloop, f, -1)
f.close()
#secure_pickle_dump(mainloop, path)
except Exception:
raise
示例15: doPost
def doPost(self,request,response):
self.valid_row_count = 0
self.log_dic = {}
sys.setrecursionlimit(sys.maxint)
sb = []
out = response.getWriter()
self.log_para = request.getParameter("log")
if self.log_para == None:
self.log_para = self.default_log_para
if self.log_para == None:
self.log_para = '[request]\n' + warHome + '/logs/request.log*\n\n[sql]\n' + warHome + '/logs/sql.log*\n\n[method]\n' + warHome + '/logs/method.log*'
ui_public.print_query_head(request, response);
result = ui_public.parse_log_para(request, response, self.log_para)
fs = result[0]['method']
date_from = result[1]
date_to = result[2]
date_from_ms= date_from*1000
date_to_ms = date_to*1000
start_time = datetime.datetime.now()
if request.getParameter("cmd") == "executeTime":
self.statisticMethodExecuteTime(fs, sb, date_from_ms, date_to_ms)
if request.getParameter('cmd') != None and len(request.getParameter('cmd')) > 0:
cost = datetime.datetime.now() - start_time
print request.getParameter('cmd'), ' cost, ', cost
ui_public.print_result_head(request, response, fs, date_from, date_to, self.valid_row_count, cost)
else:
ui_public.print_query_form(request, response, self.log_para, [['executeTime', '统计method耗时']])
#输出结果
ui_public.print_result_content_html(request, response, sb)