本文整理汇总了Python中multiprocessing.log_to_stderr方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.log_to_stderr方法的具体用法?Python multiprocessing.log_to_stderr怎么用?Python multiprocessing.log_to_stderr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.log_to_stderr方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start_service
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def start_service(self):
"""
start speaker training service.
"""
# prevent signal from propagating to child process
handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
if self.debug:
self.sprecog.debug = True
mp.log_to_stderr(logging.DEBUG)
self.sprecog.speaker_name = self.speaker_name
self.proc = mp.Process(name="watchdog", target=self.__run,
args=(self.event,))
self.proc.setDaemon = False
self.proc.start()
# restore signal
signal.signal(signal.SIGINT, handler)
示例2: main
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def main():
"""Starts several processes
This must be kept to the bare minimum
"""
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
jobs = []
try:
bfs = BrundleFuzzServer()
jobs.append(bfs)
bfs.start()
for j in jobs:
j.join()
except KeyboardInterrupt:
pass
示例3: __apply_func_with_worker_stream
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def __apply_func_with_worker_stream(args):
"""
Call func, using ``queue`` to redirect stdout and stderr, with a tuple of args because multiprocessing.Pool.map
only accepts one argument for the function.
This function is called _inside_ a separate process.
"""
# set up logging
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.WARNING)
from cea import suppres_3rd_party_debug_loggers
suppres_3rd_party_debug_loggers()
# unpack the arguments
func, queue, on_complete, i_queue, n, args = args[0], args[1], args[2], args[3], args[4], args[5:]
# set up printing to stderr and stdout to go through the queue
sys.stdout = QueueWorkerStream('stdout', queue)
sys.stderr = QueueWorkerStream('stderr', queue)
# CALL
result = func(*args)
if on_complete:
on_complete(i_queue.get(), n, args, result)
return result
示例4: init_filler
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def init_filler(dummynet, filler_cbs, in_train_mode): # pragma: no cover
"""Initialize a filler thread."""
# pylint: disable=global-variable-undefined, global-variable-not-assigned
global net, cbs, train_mode, initialized, logger
logger = _log_to_stderr(_logging.WARN)
logger.debug("Initializing filler. Train mode: %s.", in_train_mode)
net = dummynet
cbs = filler_cbs
train_mode = in_train_mode
initialized = False
示例5: run
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def run(self):
'''
Starts running the Gaussian process learner. When the new parameters event is triggered, reads the cost information provided and updates the Gaussian process with the information. Then searches the Gaussian process for new optimal parameters to test based on the biased cost. Parameters to test next are put on the output parameters queue.
'''
#logging to the main log file from a process (as apposed to a thread) in cpython is currently buggy on windows and/or python 2.7
#current solution is to only log to the console for warning and above from a process
self.log = mp.log_to_stderr(logging.WARNING)
try:
while not self.end_event.is_set():
#self.log.debug('Learner waiting for new params event')
self.save_archive()
self.wait_for_new_params_event()
#self.log.debug('Gaussian process learner reading costs')
self.get_params_and_costs()
self.fit_gaussian_process()
for _ in range(self.generation_num):
self.log.debug('Gaussian process learner generating parameter:'+ str(self.params_count+1))
next_params = self.find_next_parameters()
self.params_out_queue.put(next_params)
if self.end_event.is_set():
raise LearnerInterrupt()
except LearnerInterrupt:
pass
end_dict = {}
if self.predict_global_minima_at_end:
self.get_params_and_costs()
self.fit_gaussian_process()
self.find_global_minima()
end_dict.update({'predicted_best_parameters':self.predicted_best_parameters,
'predicted_best_cost':self.predicted_best_cost,
'predicted_best_uncertainty':self.predicted_best_uncertainty})
self.params_out_queue.put(end_dict)
self._shut_down()
self.log.debug('Ended Gaussian Process Learner')
示例6: __init__
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def __init__(self, blockchain, mempool):
mp.log_to_stderr()
mp_logger = mp.get_logger()
mp_logger.setLevel(logging.DEBUG)
self.blockchain = blockchain
self.mempool = mempool
示例7: __init__
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def __init__(self, peers, api_client, blockchain, mempool, validator):
super(FullNode, self).__init__(peers, api_client)
mp.log_to_stderr()
mp_logger = mp.get_logger()
mp_logger.setLevel(logging.DEBUG)
self.app = Bottle()
self.app.merge(public_app)
self.app.merge(permissioned_app)
self.blockchain = blockchain
self.mempool = mempool
self.validator = validator
示例8: _consume_and_write
# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import log_to_stderr [as 别名]
def _consume_and_write(queue, path, store, sharr):
"""Insert :var:`row` received from the queue into the shared memory array
at the current index and increment. Empty rows are always written to disk
(keeps stores 'call-index-aligned').
"""
proc = mp.current_process()
slog = utils.get_logger(proc.name)
log = mp.log_to_stderr(slog.getEffectiveLevel())
log.debug("starting storage writer '{}'".format(proc.name))
log.info("storage path is '{}'".format(path))
log.debug("sharr is '{}'".format(sharr))
# set up a new store instance for writing
with store.writer(path, dtypes=store.dtypes) as store:
# notify parent that file has been created
queue.put(path)
# handle no pandas/np case
buff = store if sharr is None else sharr
bufftype = type(buff)
log.debug('buffer type is {}'.format(bufftype))
for row in iter(queue.get, Terminate): # consume and process
now = time.time()
# write frame to disk on buffer fill
if sharr and sharr.is_full():
log.debug('writing to {} storage...'.format(store.ext))
try:
# push a data frame
store.put(pd.DataFrame.from_records(buff.read()))
except ValueError:
log.error(traceback.format_exc())
log.debug("storage put took '{}'".format(time.time() - now))
try: # push to ring buffer (or store if no pd)
buff.put(row)
log.debug("{} insert took '{}'".format(
bufftype, time.time() - now))
except ValueError:
log.error(traceback.format_exc())
log.debug("terminating frame writer '{}'".format(proc.name))