本文整理匯總了Python中logging.handlers.QueueHandler方法的典型用法代碼示例。如果您正苦於以下問題:Python handlers.QueueHandler方法的具體用法?Python handlers.QueueHandler怎麽用?Python handlers.QueueHandler使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類logging.handlers
的用法示例。
在下文中一共展示了handlers.QueueHandler方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __call__
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def __call__(self, *args, **kwargs):
queue_in = self.queue_in
queue_out = self.queue_out
logger = logging.getLogger()
logger.addHandler(QueueHandler(queue_out))
logger.setLevel(logging.DEBUG if self._debug else logging.INFO)
db.init(self._settings['db_path'], False)
self._ready()
heartbeat_sequence = 1
while True:
try:
task = queue_in.get(timeout=HEARTBEAT_INTERVAL)
if isinstance(task, tasks.Task):
self._work(str(task))
self._done(task(**self._settings))
except queues.Empty:
self._heartbeat(heartbeat_sequence)
heartbeat_sequence += 1
except Exception as e:
self._error(e, traceback.format_exc())
except KeyboardInterrupt:
break
示例2: __init__
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def __init__(self,
log_dir: str,
verbose: bool = False,
root_log_level: Optional[int] = None,
capture_warnings: bool = True,
console_prefix: Optional[str] = None,
hyper_params: Optional[Dict] = None) -> None:
self.log_dir = log_dir
self.verbose = verbose
self.log_level = logging.NOTSET
self.capture_warnings = capture_warnings
self.listener: handlers.QueueListener
self.console_prefix = console_prefix
self.handlers: List[logging.Handler] = []
self.queue_handler: handlers.QueueHandler
self.old_root_log_level: int = logging.NOTSET
self.hyper_params: Dict = hyper_params or {}
示例3: get_process_safe_logger
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def get_process_safe_logger(self) -> logging.Logger:
"""
Returns a process-safe logger
This logger sends all records to the main process
:return:
"""
queue_handler = QueueHandler(self.__queue)
root_logger = logging.getLogger()
# The fork may have happened after the root logger was setup by the main process
# Remove all handlers from the root logger for this process
handlers = root_logger.handlers[:]
for handler in handlers:
handler.close()
root_logger.removeHandler(handler)
root_logger.addHandler(queue_handler)
root_logger.setLevel(self.__logger_level)
return root_logger
示例4: __setstate__
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def __setstate__(self, state):
self.__dict__.update(state)
self.handler = logging_handlers.QueueHandler(self.logging_queue)
self.logger = logging.getLogger(self.name)
self.logger.addHandler(self.handler)
self.logger.setLevel(self.json_conf["log_settings"]["log_level"])
self.logger.propagate = False
self.engine = dbutils.connect(self.json_conf, self.logger)
self.analyse_locus = functools.partial(analyse_locus,
json_conf=self.json_conf,
engine=self.engine,
logging_queue=self.logging_queue)
# self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier)
self.handler = logging_handlers.QueueHandler(self.logging_queue)
self.logger.addHandler(self.handler)
示例5: _setup_logging_queue
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def _setup_logging_queue(*handlers: Handler) -> QueueHandler:
"""Create a new LocalQueueHandler and start an associated QueueListener.
"""
queue: Queue = Queue()
queue_handler = LocalQueueHandler(queue)
serving_listener = QueueListener(queue, *handlers, respect_handler_level=True)
serving_listener.start()
return queue_handler
示例6: configure_asynchronous_logging
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def configure_asynchronous_logging(logging_queue: multiprocessing.Queue):
"""Helper for asynchronous logging - Writes all logs to a queue.
"""
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
queue_handler = QueueHandler(logging_queue)
queue_handler.setLevel(logging.DEBUG)
root_logger.addHandler(queue_handler)
logger_all.debug("Asynchronous logging has been set.")
示例7: _extract_output
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def _extract_output(q):
'''Extract log output from a QueueHandler queue'''
out = []
while not q.empty():
record = q.get()
# Use list instead of tuple to have the same data before and after mongo persist
out.append([record.levelname.lower(), record.getMessage()])
return out
示例8: setup_logging_queues
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def setup_logging_queues():
if sys.version_info.major < 3:
raise RuntimeError("This feature requires Python 3.")
queue_listeners = []
# Q: What about loggers created after this is called?
# A: if they don't attach their own handlers they should be fine
for logger in get_all_logger_names(include_root=True):
logger = logging.getLogger(logger)
if logger.handlers:
log_queue = queue.Queue(-1) # No limit on size
queue_handler = QueueHandler(log_queue)
queue_listener = QueueListener(
log_queue, respect_handler_level=True)
queuify_logger(logger, queue_handler, queue_listener)
# print("Replaced logger %s with queue listener: %s" % (
# logger, queue_listener
# ))
queue_listeners.append(queue_listener)
for listener in queue_listeners:
listener.start()
atexit.register(stop_queue_listeners, *queue_listeners)
return
示例9: queuify_logger
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def queuify_logger(logger, queue_handler, queue_listener):
"""Replace logger's handlers with a queue handler while adding existing
handlers to a queue listener.
This is useful when you want to use a default logging config but then
optionally add a logger's handlers to a queue during runtime.
Args:
logger (mixed): Logger instance or string name of logger to queue-ify
handlers.
queue_handler (QueueHandler): Instance of a ``QueueHandler``.
queue_listener (QueueListener): Instance of a ``QueueListener``.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
# Get handlers that aren't being listened for.
handlers = [handler for handler in logger.handlers
if handler not in queue_listener.handlers]
if handlers:
# The default QueueListener stores handlers as a tuple.
queue_listener.handlers = \
tuple(list(queue_listener.handlers) + handlers)
# Remove logger's handlers and replace with single queue handler.
del logger.handlers[:]
logger.addHandler(queue_handler)
示例10: _setup_task_process
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def _setup_task_process(mp_log_q):
# Setting up logging and cfg, needed since this is a new process
cfg.CONF(sys.argv[1:], project='coriolis', version="1.0.0")
utils.setup_logging()
# Log events need to be handled in the parent process
log_root = logging.getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
log_root.addHandler(handlers.QueueHandler(mp_log_q))
示例11: init_fail
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
示例12: __setup_logger
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def __setup_logger(self):
"""
Private method to set up the logger using indications in the
args namespace.
"""
if hasattr(self.args, "log_queue"):
# noinspection PyUnresolvedReferences
self.queue_handler = log_handlers.QueueHandler(self.args.log_queue)
else:
self.queue_handler = logging.NullHandler
if self._counter is None:
self.logger = logging.getLogger("stat_logger")
else:
self.logger = logging.getLogger("stat_logger-{}".format(self._counter))
self.logger.addHandler(self.queue_handler)
# noinspection PyUnresolvedReferences
if self.args.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.propagate = False
return
# pylint: disable=too-many-locals
示例13: run
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def run(self):
global log
log = logging.getLogger()
log.setLevel(self.log_level)
# log.handlers = [] # Remove all other handlers
# log.addHandler(TopicQueueHandler(self.monitor_queue))
# log.addHandler(QueueHandler(self.log_queue))
log.info("Worker thread started")
self.procedure = self.results.procedure
self.recorder = Recorder(self.results, self.recorder_queue)
self.recorder.start()
#locals()[self.procedures_file] = __import__(self.procedures_file)
# route Procedure methods & log
self.procedure.should_stop = self.should_stop
self.procedure.emit = self.emit
if self.port is not None and zmq is not None:
try:
self.context = zmq.Context()
log.debug("Worker ZMQ Context: %r" % self.context)
self.publisher = self.context.socket(zmq.PUB)
self.publisher.bind('tcp://*:%d' % self.port)
log.info("Worker connected to tcp://*:%d" % self.port)
time.sleep(0.01)
except Exception:
log.exception("couldn't connect to ZMQ context")
log.info("Worker started running an instance of %r", self.procedure.__class__.__name__)
self.update_status(Procedure.RUNNING)
self.emit('progress', 0.)
try:
self.procedure.startup()
self.procedure.execute()
except (KeyboardInterrupt, SystemExit):
self.handle_abort()
except Exception:
self.handle_error()
finally:
self.shutdown()
self.stop()
示例14: execute
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def execute(self, recordonly=False, dryrun=False):
'''
Execute a migration
If recordonly is True, the migration is only recorded
If dryrun is True, the migration is neither executed nor recorded
'''
q = queue.Queue(-1) # no limit on size
handler = QueueHandler(q)
handler.setFormatter(MigrationFormatter())
logger = getattr(self.module, 'log', logging.getLogger(self.module.__name__))
logger.propagate = False
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(handler)
if not hasattr(self.module, 'migrate'):
error = SyntaxError('A migration should at least have a migrate(db) function')
raise MigrationError('Error while executing migration', exc=error)
out = [['info', 'Recorded only']] if recordonly else []
state = {}
if not recordonly and not dryrun:
db = get_db()
db._state = state
try:
self.module.migrate(db)
out = _extract_output(q)
except Exception as e:
out = _extract_output(q)
self.add_record('migrate', out, db._state, False)
fe = MigrationError('Error while executing migration',
output=out, exc=e)
if hasattr(self.module, 'rollback'):
try:
self.module.rollback(db)
out = _extract_output(q)
self.add_record('rollback', out, db._state, True)
msg = 'Error while executing migration, rollback has been applied'
fe = RollbackError(msg, output=out, migrate_exc=fe)
except Exception as re:
out = _extract_output(q)
self.add_record('rollback', out, db._state, False)
msg = 'Error while executing migration rollback'
fe = RollbackError(msg, output=out, exc=re, migrate_exc=fe)
raise fe
if not dryrun:
self.add_record('migrate', out, state, True)
return out
示例15: __init__
# 需要導入模塊: from logging import handlers [as 別名]
# 或者: from logging.handlers import QueueHandler [as 別名]
def __init__(self,
json_conf,
locus_queue,
logging_queue,
status_queue,
identifier,
tempdir="mikado_pick_tmp"
):
# current_counter, gene_counter, current_chrom = shared_values
super(LociProcesser, self).__init__()
json_conf = msgpack.loads(json_conf, raw=False)
self.logging_queue = logging_queue
self.status_queue = status_queue
self.__identifier = identifier # Property directly unsettable
self.name = "LociProcesser-{0}".format(self.identifier)
self.json_conf = json_conf
self.engine = None
self.handler = logging_handlers.QueueHandler(self.logging_queue)
self.logger = logging.getLogger(self.name)
self.logger.addHandler(self.handler)
self.logger.setLevel(self.json_conf["log_settings"]["log_level"])
self.logger.propagate = False
self._tempdir = tempdir
self.locus_queue = locus_queue
self.regressor = None
# self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier)
if self.json_conf["pick"]["scoring_file"].endswith((".pickle", ".model")):
with open(self.json_conf["pick"]["scoring_file"], "rb") as forest:
self.regressor = pickle.load(forest)
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
if not isinstance(self.regressor["scoring"], (RandomForestRegressor, RandomForestClassifier)):
exc = TypeError("Invalid regressor provided, type: %s", type(self.regressor))
self.logger.critical(exc)
self.exitcode = 9
self.join()
self.logger.debug("Starting Process %s", self.name)
self.logger.debug("Starting the pool for {0}".format(self.name))
try:
self.engine = dbutils.connect(self.json_conf, self.logger)
except KeyboardInterrupt:
raise
except EOFError:
raise
except Exception as exc:
self.logger.exception(exc)
return
self.analyse_locus = functools.partial(analyse_locus,
json_conf=self.json_conf,
engine=self.engine,
logging_queue=self.logging_queue)