本文整理汇总了Python中thread.stack_size函数的典型用法代码示例。如果您正苦于以下问题:Python stack_size函数的具体用法?Python stack_size怎么用?Python stack_size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stack_size函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_environment
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='hewwolord', paths=paths)
config['routes.map'] = make_map()
config['pylons.app_globals'] = app_globals.Globals()
config['pylons.h'] = hewwolord.lib.helpers
# Create the Genshi TemplateLoader
config['pylons.app_globals'].genshi_loader = TemplateLoader(
paths['templates'], auto_reload=True)
# Setup the SQLAlchemy database engine
if 'sqlalchemy.module' in config:
config['sqlalchemy.module'] = __import__(config['sqlalchemy.module'])
engine = engine_from_config(config, 'sqlalchemy.')
try:
import coev
flagdict = {
'coev.debug.lib.coev': coev.CDF_COEV,
'coev.debug.lib.coev.dump': coev.CDF_COEV_DUMP,
'coev.debug.lib.colock': coev.CDF_COLOCK,
'coev.debug.lib.colock.dump': coev.CDF_COLOCK_DUMP,
'coev.debug.lib.nbuf': coev.CDF_NBUF,
'coev.debug.lib.nbuf.dump': coev.CDF_NBUF_DUMP,
'coev.debug.lib.runq.dump': coev.CDF_RUNQ_DUMP,
'coev.debug.lib.stack': coev.CDF_STACK,
'coev.debug.lib.stack.dump': coev.CDF_STACK_DUMP }
lib_debug_flags = 0
for f in flagdict:
if asbool(config.get(f, False)):
lib_debug_flags |= flagdict[f]
coev.setdebug( asbool(config.get('coev.debug.module', False)),
lib_debug_flags )
import thread
thread.stack_size(int(config.get('coev.stack.size', 2 * 1024 * 1024)))
except ImportError:
pass
mcservers = aslist(config['memcache.servers'])
mcdebug = asbool(config['memcache.debug'])
init_model(engine, mcservers, mcdebug)
示例2: setup_app
def setup_app(command, conf, vars):
"""Place any commands to setup quanthistling here"""
thread.stack_size(512 * 1024)
# Don't reload the app if it was loaded under the testing environment
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
示例3: test_stack_size
def test_stack_size():
import sys
if is_cli or (sys.version_info[0] == 2 and sys.version_info[1] > 4) or sys.version_info[0] > 2:
import thread
size = thread.stack_size()
Assert(size==0 or size>=32768)
bad_size_list = [ 1, -1, -32768, -32769, -32767, -40000, 32767, 32766]
for bad_size in bad_size_list:
AssertError(ValueError, thread.stack_size, bad_size)
good_size_list = [4096*10, 4096*100, 4096*1000, 4096*10000]
for good_size in good_size_list:
#CodePlex Work Item 7827
if (is_cli or is_silverlight) and good_size<=50000: print "Ignoring", good_size, "for CLI"; continue
temp = thread.stack_size(good_size)
Assert(temp>=32768 or temp==0)
def temp(): pass
thread.start_new_thread(temp, ())
temp = thread.stack_size(1024*1024)
Assert(temp>=32768 or temp==0)
示例4: test_stack_size
def test_stack_size(self):
import thread
thread.stack_size(0)
res = thread.stack_size(0)
assert res == 0
res = thread.stack_size(1024*1024)
assert res == 0
res = thread.stack_size(2*1024*1024)
assert res == 1024*1024
res = thread.stack_size(0)
assert res == 2*1024*1024
示例5: hashtag_stream
def hashtag_stream(hashtags, auth):
print "listening for %s" % ",".join(hashtags)
twitter_stream = TwitterStream(auth=auth)
try:
iterator = twitter_stream.statuses.filter(track=",".join(hashtags))
except:
time.sleep(3)
iterator = twitter_stream.statuses.filter(track=",".join(hashtags))
while True:
print "*"*40
print "stack size: %d" % thread.stack_size()
print "started thread worker for %s" % ",".join(hashtags)
for tweet in iterator:
try:
hashtag = "#"
try:
for htag in hashtags:
if htag in str(tweet["text"]):
hashtag = str(htag)
err_count = bayespell.errors(tweet["text"])
if hashtag.strip() != "#":
print hashtag, err_count
hashtag_object = Hashtag.objects.get(tag=hashtag)
hashtag_object.typos = hashtag_object.typos + err_count
hashtag_object.save()
except:
pass
continue
except:
time.sleep(3)
twitter_stream = TwitterStream(auth=auth)
try:
iterator = twitter_stream.statuses.filter(track=",".join(hashtags))
except:
time.sleep(3)
iterator = twitter_stream.statuses.filter(track=",".join(hashtags))
continue
示例6: print
done.release()
print ('\n*** Barrier Test ***')
if done.acquire(0):
raise ValueError("'done' should have remained acquired")
bar = barrier(numtasks)
running = numtasks
for i in range(numtasks):
thread.start_new_thread(task2, (i,))
done.acquire()
print ('all tasks done')
if hasattr(thread, 'stack_size'):
# not all platforms support changing thread stack size
print ('\n*** Changing thread stack size ***')
if thread.stack_size() != 0:
raise ValueError("initial stack_size not 0")
thread.stack_size(0)
if thread.stack_size() != 0:
raise ValueError("stack_size not reset to default")
from os import name as os_name
if os_name in ("nt", "os2", "posix"):
tss_supported = 1
try:
thread.stack_size(4096)
except ValueError:
print ('caught expected ValueError setting stack_size(4096)')
except thread.error:
示例7: WorkloadGenerator
if __name__ == "__main__":
gc.set_debug(gc.DEBUG_LEAK)
parser = optparse.OptionParser(usage="%prog -h [host] -p [port] -l [load (sessions/s)] -d [duration (seconds)] -t [total (sessions)] -n [seed]", version="%prog 1.0")
parser.add_option("-s", "--server", dest="host", type="str", default="localhost",
help= "server to connect to")
parser.add_option("-p", "--port", dest="port", type="int", default=80,
help= "port number to connect to")
parser.add_option("-l", "--load", dest="load", type="int", default=1,
help= "number of sessions to create per second")
parser.add_option("-t", "--total", dest="total", type="int", default=None,
help= "total number of sessions to generate")
parser.add_option("-d", "--duration", dest="duration", type="int", default=1,
help= "total number of seconds to generate load")
parser.add_option("-n", "--seed", dest="seed", type="int", default=100,
help= "random number generator seed")
(options, args) = parser.parse_args()
# seed random number generator
random.seed(options.seed)
thread.stack_size(50000)
# launch generator
wg = WorkloadGenerator(options.host, options.port, options.load)
if options.total:
wg.startTotal(options.total)
else:
wg.startDuration(options.duration)
示例8: setup_server
def setup_server(self):
"""Configure CherryPy server from application configuration.
Traverses the server configuration portion and applies parameters
known to be for CherryPy to the CherryPy server configuration.
These are: engine, hooks, log, request, respose, server, tools,
wsgi, checker.
Also applies pseudo-parameters ``thread_stack_size`` (default: 128kB)
and ``sys_check_interval`` (default: 10000). The former sets the
default stack size to desired value, to avoid excessively large
thread stacks -- typical operating system default is 8 MB, which
adds up rather a lot for lots of server threads. The latter sets
python's ``sys.setcheckinterval``; the default is to increase this
to avoid unnecessarily frequent checks for python's GIL, global
interpreter lock. In general we want each thread to complete as
quickly as possible without making unnecessary checks."""
cpconfig = cherrypy.config
# Determine server local base.
port = getattr(self.srvconfig, 'port', 8080)
local_base = getattr(self.srvconfig, 'local_base', socket.gethostname())
if local_base.find(':') == -1:
local_base = '%s:%d' % (local_base, port)
# Set default server configuration.
cherrypy.log = Logger()
cpconfig.update({'server.max_request_body_size': 0})
cpconfig.update({'server.environment': 'production'})
cpconfig.update({'server.socket_host': '0.0.0.0'})
cpconfig.update({'server.socket_port': port})
cpconfig.update({'server.socket_queue_size': 100})
cpconfig.update({'server.thread_pool': 100})
cpconfig.update({'tools.proxy.on': True})
cpconfig.update({'tools.proxy.base': local_base})
cpconfig.update({'tools.time.on': True})
cpconfig.update({'engine.autoreload_on': False})
cpconfig.update({'request.show_tracebacks': False})
cpconfig.update({'request.methods_with_bodies': ("POST", "PUT", "DELETE")})
thread.stack_size(getattr(self.srvconfig, 'thread_stack_size', 128*1024))
sys.setcheckinterval(getattr(self.srvconfig, 'sys_check_interval', 10000))
self.silent = getattr(self.srvconfig, 'silent', False)
# Apply any override options from app config file.
for section in ('engine', 'hooks', 'log', 'request', 'response',
'server', 'tools', 'wsgi', 'checker'):
if not hasattr(self.srvconfig, section):
continue
for opt, value in getattr(self.srvconfig, section).dictionary_().iteritems():
if isinstance(value, ConfigSection):
for xopt, xvalue in value.dictionary_().iteritems():
cpconfig.update({"%s.%s.%s" % (section, opt, xopt): xvalue})
elif isinstance(value, str) or isinstance(value, int):
cpconfig.update({"%s.%s" % (section, opt): value})
else:
raise RuntimeError("%s.%s should be string or int, got %s"
% (section, opt, type(value)))
# Apply security customisation.
if hasattr(self.srvconfig, 'authz_defaults'):
defsec = self.srvconfig.authz_defaults
cpconfig.update({'tools.cms_auth.on': True})
cpconfig.update({'tools.cms_auth.role': defsec['role']})
cpconfig.update({'tools.cms_auth.group': defsec['group']})
cpconfig.update({'tools.cms_auth.site': defsec['site']})
if hasattr(self.srvconfig, 'authz_policy'):
cpconfig.update({'tools.cms_auth.policy': self.srvconfig.authz_policy})
示例9: Input
import re
import thread
import traceback
thread.stack_size(1024 * 512) # reduce vm size
class Input(dict):
def __init__(self, conn, raw, prefix, command, params,
nick, user, host, paraml, msg):
server = conn.server_host
chan = paraml[0].lower()
if chan == conn.nick.lower(): # is a PM
chan = nick
def say(msg):
conn.msg(chan, msg)
def reply(msg):
if chan == nick: # PMs don't need prefixes
self.say(msg)
else:
self.say(nick + ': ' + msg)
def pm(msg, nick=nick):
conn.msg(nick, msg)
def set_nick(nick):
示例10: open
#with open('SCCTest.txt', 'r') as f:
#with open('SCCTest2.txt', 'r') as f:
lines = f.readlines()
x = 0
for line in lines:
if 0 == (x % 10000): print 'Read %d' % x
edge = line.split()
edge[0] = int(edge[0]) - 1
edge[1] = int(edge[1]) - 1
edges[edge[0]].append(edge[1])
revEdges[edge[1]].append(edge[0])
x += 1
print 'Loaded'
sys.setrecursionlimit(100000)
thread.stack_size(2**27)
t1 = threading.Thread(target = scc)
begin = time.clock()
t1.start() # start the scc thread
t1.join() # and wait for it to finish
print time.clock() - begin
#scc()
#print 'Done'
l = []
for scc in sccs: l.append(len(scc))
l.sort()
l.reverse()
s = ''
for i in xrange(5):
if i >= len(l): s += '0,'
else: s += str(l[i]) + ','
示例11: Timer
import sys
import random
import time
from Tkinter import *
import ImageTk
from threading import Thread
import thread
thread.stack_size(2**17) # allow some more threads
RADIUS=2
WIDTH, HEIGHT = 400, 400
FPS=40
COLOR_GREEN = "#00FF00"
COLOR_GRAY = "#DDDDDD"
COLOR_YELLOW = "#FAFAC8"
class Timer(Thread):
"executes a task repeatedly "
def __init__(self, update=None, wait_time=1.0):
Thread.__init__(self)
self.wait_time = wait_time # in secs as float
self.update = update
self.running = True
def run(self):
while self.running:
time.sleep(self.wait_time)
if self.running:
self.update()
示例12: run
def run(self):
# If we can, replace lgTaskProcessor with lgTaskSlave in our title
try:
import setproctitle
title = setproctitle.getproctitle()
if 'lgTaskProcessor' in title:
title = title.replace('lgTaskProcessor', 'lgTaskSlave')
else:
title += ' --slave'
setproctitle.setproctitle(title)
except ImportError:
pass
# We're in our own process now, so disconnect the processor's
# pymongo connection to make sure we don't hold those sockets open
self._processor.taskConnection.close()
# Also, ensure that the global talk variables weren't copied over.
# This only affects testing situations - that is, the normal processor
# process won't use talk.
import lgTask.talk
lgTask.talk.talkConnection.resetFork()
canQsize = True
try:
self._queue.qsize()
except NotImplementedError:
# Oh Mac OS X, how silly you are sometimes
canQsize = False
self._fixSigTerm()
# rconsole?
if self._processor._useRConsole:
import lgTask.lib.rfooUtil as rfooUtil
rfooUtil.spawnServer()
# Any tasks that we start only really need a teeny bit of stack
thread.stack_size(1024 * 1024)
try:
while True:
try:
# See if we should be marked as accepting new tasks from
# the Processor
if self._isAccepting.value:
self._checkAccepting()
# Check tasks are running
self._checkRunning()
# Get new task
taskData = self._queue.get(
timeout = self._processor.KILL_INTERVAL
)
taskThread = InterruptableThread(
target = self._runTaskThreadMain
, args = (taskData,)
)
# Remember the ID so that we can check for "kill" states
taskThread.taskId = taskData['_id']
taskThread.start()
self._running.append(taskThread)
# Update running count
newCount = len(self._running)
if canQsize:
newCount += self._queue.qsize()
self._runningCount.value = newCount
except Empty:
pass
except Exception:
self._processor.log("Slave error {0}: {1}".format(
self.pid, traceback.format_exc()
))
# After each iteration, see if we're alive
if not self._shouldContinue():
break
except:
self._processor.log("Slave error {0}: {1}".format(
self.pid, traceback.format_exc()
))
finally:
pass
示例13: _gc_collect
import gc
import sys
from threading import Timer
import thread
thread.stack_size(1024 * 1024) # 1Mb, default is 8Mb
# Setup hourly garbage collection (and stats printing)
def _gc_collect():
# shows all the memleak info and stores
# uncollectable objects to gc.garbage
# sys.stderr.write("\n-- DUMP BEGIN --\n")
# gc.set_debug(gc.DEBUG_LEAK)
gc.collect()
# sys.stderr.write("Uncollectable unreachable objects (sorry):\n")
# for obj in gc.garbage:
# try:
# sys.stderr.write("%s\n" % (obj,))
# except:
# pass
# sys.stderr.write("Quantity: %s\n" % (len(gc.garbage),))
# sys.stderr.write("\n-- DUMP END --\n")
# gc.set_debug(False)
# now empty the list
# del gc.garbage[:]
# gc.collect()
_setup_timer()
示例14:
import thread
thread.stack_size(512 * 1024)
示例15: thread_stack_size
def thread_stack_size(size=None):
if size is not None:
raise thread.ThreadError()
# doesn't really apply, but whatever
return thread.stack_size()