本文整理汇总了Python中twisted.internet.reactor.getThreadPool函数的典型用法代码示例。如果您正苦于以下问题:Python getThreadPool函数的具体用法?Python getThreadPool怎么用?Python getThreadPool使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getThreadPool函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sighup_handler
def _sighup_handler(self, signum, frame):
self.ufc.configure()
# Si hemos cambiado la configuración de base de datos debemos abrir
# de nuevo todas las conexiones.
log.info("Restarting threadpool...")
reactor.getThreadPool().stop()
reactor.getThreadPool().start()
示例2: update_status_in_thread
def update_status_in_thread(self):
# TODO: make sure performance is not a problem as current approach queries
# database many times.
"""
tasks = get_tasks_by_service(service_id)
clusters = []
for task in tasks:
if task.job.cluster not in clusters:
clusters.append(task.job.cluster)
"""
logger.info("updating clusters status, "
"%d task in queue, %d workers, %d total threads",
reactor.getThreadPool().q.qsize(),
len(reactor.getThreadPool().working),
len(reactor.getThreadPool().threads))
try:
self.start_time = time.time()
for cluster in Cluster.objects.filter(active=True).all():
self.update_cluster_status(cluster)
logger.info("spent %f seconds for updating clusters status",
time.time() - self.start_time)
logger.info("gc: %r", gc.get_count())
logger.info("usage: %r", resource.getrusage(resource.RUSAGE_SELF))
except Exception as e:
logger.warning("%Failed to update statu: %r", e)
finally:
# reactor.callLater is NOT thread-safe but reactor.callFromThread is, so we
# put the callLater to the main loop.
reactor.callFromThread(
reactor.callLater, self.collector_config.period, self.update_status)
示例3: test_make_worker_with_threadpool_size
def test_make_worker_with_threadpool_size(self):
"""
The reactor threadpool can be resized with a command line option.
"""
from twisted.internet import reactor
old_maxthreads = reactor.getThreadPool().max
self.add_cleanup(reactor.suggestThreadPoolSize, old_maxthreads)
# Explicitly set the threadpool size to something different from the
# value we're testing with.
reactor.suggestThreadPoolSize(5)
self.mk_config_file('worker', ["transport_name: sphex"])
maker = VumiWorkerServiceMaker()
# By default, we don't touch the threadpool.
options = StartWorkerOptions()
options.parseOptions([
'--worker-class', 'vumi.demos.words.EchoWorker',
'--config', self.config_file['worker'],
])
worker = maker.makeService(options)
self.assertEqual({'transport_name': 'sphex'}, worker.config)
self.assertEqual(reactor.getThreadPool().max, 5)
# If asked, we set the threadpool's maximum size.
options_mt = StartWorkerOptions()
options_mt.parseOptions([
'--worker-class', 'vumi.demos.words.EchoWorker',
'--config', self.config_file['worker'],
'--maxthreads', '2',
])
worker = maker.makeService(options_mt)
self.assertEqual({'transport_name': 'sphex'}, worker.config)
self.assertEqual(reactor.getThreadPool().max, 2)
示例4: _start_in_multi_user_mode
def _start_in_multi_user_mode(args, root_resource, services_factory):
try:
protected_resources = _setup_multi_user(args, root_resource, services_factory)
start_site(args, protected_resources)
reactor.getThreadPool().adjustPoolsize(5, 15)
return defer.succeed(None)
except Exception as e:
return defer.fail(e)
示例5: dataReceived
def dataReceived(self, data):
"""
Overridden to stop trying to read data while outputting a response.
This stops netcat from quitting before it gets the output!
"""
reactor.removeReader(self.reader)
retval = LineOnlyReceiver.dataReceived(self, data)
reactor.getThreadPool().callInThreadWithCallback(
self.processLinesDone, self.processLines)
return retval
示例6: _start_in_multi_user_mode
def _start_in_multi_user_mode(args, root_resource, services_factory):
if args.provider is None:
raise ValueError('provider name is required')
init_monkeypatches()
events_server.ensure_server()
config, provider = initialize_leap_provider(args.provider, args.leap_provider_cert, args.leap_provider_cert_fingerprint, args.leap_home)
protected_resource = set_up_protected_resources(root_resource, provider, services_factory)
start_site(args, protected_resource)
reactor.getThreadPool().adjustPoolsize(5, 15)
return defer.succeed(None)
示例7: update_metrics_in_thread
def update_metrics_in_thread(self, metricsRawData):
try:
logger.info("%r updating metrics, "
"%d task in queue, %d workers, %d total threads",
self.task,
reactor.getThreadPool().q.qsize(),
len(reactor.getThreadPool().working),
len(reactor.getThreadPool().threads))
start_time = time.time()
# analyze the metric if needed
if self.need_analyze:
if metricsRawData:
metrics = json.loads(metricsRawData)
metrics_saved = {}
for bean_output in metrics["beans"]:
bean_name = bean_output["name"]
for metric_name, metric_value in bean_output.iteritems():
if metric_name in ["name", "modelerType"]: continue
metric_type = type(metric_value)
# Do some hadoop/hbase specific work :)
if metric_name in BOOL_METRIC_MAP:
metric_value = int(metric_value == BOOL_METRIC_MAP[metric_name])
elif metric_type is list or metric_type is dict:
# Just store the length.
metric_value = len(metric_value)
elif metric_type is bool:
metric_value = int(metric_value)
elif metric_value is None:
metric_value = 0
elif not (metric_type is int or metric_type is float
or metric_type is unicode or metric_type is str):
logger.warning("Unexpected metric type %s/%s: %r/%r",
bean_name, metric_name, metric_type, metric_value)
continue
# TODO: comment this out temporarily, remove it forever if we don't
# want to use it.
#metric = MetricObjectCache.get(bean_name, metric_name)
group = metrics_saved.setdefault(bean_name, {})
group[metric_name] = metric_value
self.task.last_metrics = json.dumps(metrics_saved)
self.analyze_metrics(metrics)
self.task.save()
logger.info("%r spent %f seconds for saving task status",
self.task, time.time() - start_time)
except Exception, e:
logger.warning("%r failed to update metric: %r", self.task, e)
traceback.print_exc()
示例8: run
def run(runSearch=True):
#module.descServer = reactor.listenTCP(0, Site(DescriptionServerPage())) #@UndefinedVariable
module.descServer.listen()
module.discovery.listen()
if runSearch:
module.discovery.search()
module.localDeviceManager._sendAlive()
reactor.addSystemEventTrigger("before", "shutdown", module.localDeviceManager.byeBye)
reactor.run() #@UndefinedVariable
reactor.getThreadPool().stop() #@UndefinedVariable
示例9: attach_app
def attach_app(self, subOptions):
app = None
fromAppOpts = subOptions.parent.get('appOpts', {}).get('app')
if fromAppOpts is not None:
app = fromAppOpts
elif subOptions['app'] is not None:
app = import_string(subOptions['app'])
else:
# no app nor app import path given, let's guess!
files_in_cwd = os.listdir(os.getcwd())
if 'manage.py' in files_in_cwd:
sys.path.insert(0, os.getcwd())
from txdevserver.django_helpers import get_django_app
django_app = get_django_app('manage.py')
if django_app is not None:
app = django_app
if app is None:
app = NoResource("Couldn't find the app!")
rv = LoggedWSGIResource(reactor, reactor.getThreadPool(), app,
subOptions.get('log_data_factory'))
self.app = rv
示例10: _multi_threaded_wsgi_resource
def _multi_threaded_wsgi_resource(self,wsgi_handler):
"""runs twisted in a thread-pool for production mode"""
pool = threadpool.ThreadPool()
pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)
wsgi_resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), wsgi_handler)
return wsgi_resource
示例11: maybeDeferToThread
def maybeDeferToThread(f, *args, **kwargs):
"""Call the function C{f} using a thread from the given threadpool
Return sthe result as a Deferred.
@param f: The function to call. May return a deferred.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f, or an
errback with a L{twisted.python.failure.Failure} if f throws an
exception.
"""
threadpool = reactor.getThreadPool()
d = Deferred()
def realOnResult(result):
if not isinstance(result, Failure):
reactor.callFromThread(d.callback, result)
else:
reactor.callFromThread(d.errback, result)
def onResult(success, result):
assert success
assert isinstance(result, Deferred)
result.addBoth(realOnResult)
threadpool.callInThreadWithCallback(onResult, maybeDeferred,
f, *args, **kwargs)
return d
示例12: _init_search
def _init_search(cls):
"""
Initializes everything needed for search.
"""
config_path = cls.search_config_path
if not os.path.exists(config_path):
raise OSError(errno.ENOENT, "Config %r does not exist." % config_path, config_path)
config_dir = os.path.dirname(config_path)
# Read config.
with open(config_path, 'rb') as fh:
config = json.load(fh)
cls.search_config = config
# Connect to mongo.
host = config['mongo']['host']
port = config['mongo'].get('port', None) or 27017
thread_pool = reactor.getThreadPool()
pool_size = int(math.ceil((thread_pool.min + thread_pool.max) / 2))
cls.search_mongo = txmongo.lazyMongoConnectionPool(host=host, port=port, pool_size=pool_size)
cls.search_order_db = cls.search_mongo[config['mongo']['order_dbname']]
cls.search_order_tb = cls.search_order_db[config['mongo']['order_tbname']]
# Initialize PyLucene.
lucene.initVM()
# Open index.
index_path = os.path.abspath(os.path.join(config_dir, config['lucene']['index_path']))
if not os.path.exists(index_path):
raise OSError(errno.ENOENT, "Index %r does not exist." % index_path, index_path)
elif not os.path.isdir(index_path):
raise OSError(errno.ENOTDIR, "Index %r is not a directory." % index_path, index_path)
index_dir = lucene.NIOFSDirectory(lucene.File(index_path))
#index_dir = lucene.SimpleFSDirectory(lucene.File(index_path)) # windows
cls.search_searcher = lucene.IndexSearcher(index_dir)
示例13: run
def run(self, app):
from twisted.web import server, wsgi
from twisted.internet import reactor
resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app)
server = server.Site(resource)
reactor.listenTCP(port=self.port, factory=server, interface=self.host)
reactor.run()
示例14: run
def run():
argv = sys.argv[1:]
if argv:
config_file_path = argv[0]
else:
caller_file = inspect.getouterframes(inspect.currentframe())[1][1]
caller_file = os.path.realpath(caller_file)
buildout_dir = os.path.dirname(os.path.dirname(caller_file))
config_file_path = os.path.join(buildout_dir, 'parts', 'etc', 'config.ini')
if not os.path.isfile(config_file_path):
print u'Path to config file must be given as a single parameter, for example "bin/run parts/etc/config.ini"'
return
paster.setup_logging(config_file_path)
settings = paster.get_appsettings(config_file_path)
app = main(None, **settings)
from intranet3 import cron
if not config.get('CRON_DISABLE'):
cron.run_cron_tasks()
full_config_path = os.path.abspath(config_file_path)
server_config = ConfigParser.ConfigParser()
server_config.readfp(open(full_config_path))
port = server_config.getint('server:main', 'port')
host = server_config.get('server:main', 'host')
resource = WSGIResource(reactor, reactor.getThreadPool(), app)
site = server.Site(resource)
reactor.listenTCP(port, site)
reactor.run()
示例15: run_server
def run_server(config):
if config['debug']:
debug = True
else:
debug = False
debug = True
observer = log.PythonLoggingObserver()
observer.start()
if debug:
log.startLogging(sys.stdout)
ServerFactory = BroadcastServerFactory
factory = ServerFactory(
"ws://%s:%s" % (config['host'], config['port']),
debug=debug,
debugCodePaths=debug,
externalPort=config['external_port'])
factory.protocol = BroadcastServerProtocol
wsResource = WebSocketResource(factory)
## create a Twisted Web WSGI resource for our Pyramid server
app = make_app(config)
wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app)
## create a root resource serving everything via WSGI/, but
## the path "/ws" served by our WebSocket stuff
rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource})
## create a Twisted Web Site and run everything
##
site = Site(rootResource)
reactor.listenTCP(config['port'], site, interface=config['host'])
reactor.run()