当前位置: 首页>>代码示例>>Python>>正文


Python util.get_logger函数代码示例

本文整理汇总了Python中multiprocessing.util.get_logger函数的典型用法代码示例。如果您正苦于以下问题:Python get_logger函数的具体用法?Python get_logger怎么用?Python get_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, socket_filename, max_connections=64,
                 connection_timeout=10, poll_timeout=1, num_workers=1,
                 num_worker_processes=2, max_processed_jobs=1,
                 max_queued_jobs=8, logger=None):
        self.socket_address = socket_filename
        self.socket_family = 'AF_UNIX'
        self.socket_kwargs = {}
        self.socket_listener = None
        self.connection_semaphore = Semaphore(max_connections)
        self.connection_handlers = ThreadSet()
        self.connection_timeout = connection_timeout
        self.connection_poll_timeout = poll_timeout
        self.job_queue = Queue(max_queued_jobs)

        self.logger = logger or getLogger(LOGGER_NAME)

        # set verbose multi-processes debugging
        set_stream_handler(mp_util.get_logger())
        mp_util.get_logger().setLevel(mp_util.SUBWARNING)
        worker_semaphore = ProcessSemaphore(num_workers)

        self.worker_pool = ProcessPool(
            max(num_workers, num_worker_processes), init_worker,
            maxtasksperchild=max_processed_jobs,
        )
        self.worker_pool_manager = WorkerPoolManager(
            self.worker_pool, worker_semaphore, self.job_queue
        )
开发者ID:DAMATS,项目名称:WPS-Backend,代码行数:28,代码来源:daemon.py

示例2: teardown

def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    try:
        from multiprocessing.util import get_logger
    except ImportError:
        pass
    else:
        get_logger().setLevel(logging.WARNING)

    # Make sure test database is removed.
    import os
    if os.path.exists('test.db'):
        try:
            os.remove('test.db')
        except WindowsError:
            pass

    # Make sure there are no remaining threads at shutdown.
    import threading
    remaining_threads = [thread for thread in threading.enumerate()
                         if thread.getName() != 'MainThread']
    if remaining_threads:
        sys.stderr.write(
            '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % (
                remaining_threads))
开发者ID:Gandi,项目名称:celery,代码行数:25,代码来源:__init__.py

示例3: start_command

def start_command(config, args):
    initialize_logger(config)
    print('starting command: %s' % ' '.join(args))
    get_logger().info("Starting command: %s" % " ".join(args))
    try:
        subprocess.call(args, shell=True)
    except:
        pass
开发者ID:andrirad,项目名称:irma-probe,代码行数:8,代码来源:service.py

示例4: prepare

def prepare(data):
    """
    Try to get current process ready to unpickle process object
    """
    old_main_modules.append(sys.modules['__main__'])
    if 'name' in data:
        process.current_process().name = data['name']
    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']
    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()
    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])
    if 'sys_path' in data:
        sys.path = data['sys_path']
    if 'sys_argv' in data:
        sys.argv = data['sys_argv']
    if 'dir' in data:
        os.chdir(data['dir'])
    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']
    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        main_name = main_name == '__init__' and os.path.basename(os.path.dirname(main_path))
    if main_name != 'ipython':
        import imp
        if main_path is None:
            dirs = None
        elif os.path.basename(main_path).startswith('__init__.py'):
            dirs = [os.path.dirname(os.path.dirname(main_path))]
        else:
            dirs = [os.path.dirname(main_path)]
        if not main_name not in sys.modules:
            raise AssertionError(main_name)
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                main_module = imp.load_module('__parents_main__', file, path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass

    return
开发者ID:webiumsk,项目名称:WOT-0.9.12,代码行数:52,代码来源:forking.py

示例5: teardown

def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    from multiprocessing.util import get_logger
    get_logger().setLevel(logging.WARNING)
    import threading
    import os
    if os.path.exists("test.db"):
        os.remove("test.db")
    remaining_threads = [thread for thread in threading.enumerate()
                            if thread.name != "MainThread"]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" % (
                remaining_threads))
开发者ID:dpwhite2,项目名称:celery,代码行数:14,代码来源:__init__.py

示例6: setup_logging_subsystem

    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        loglevel = loglevel or self.loglevel
        format = format or self.format
        colorize = self.app.either("CELERYD_LOG_COLOR", colorize)

        if self.__class__._setup:
            return

        try:
            mputil._logger = None
        except AttributeError:
            pass
        ensure_process_aware_logger()
        logging.Logger.manager.loggerDict.clear()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()
            mp = mputil.get_logger()
            for logger in (root, mp):
                self._setup_logger(logger, logfile,
                                   format, colorize, **kwargs)
                logger.setLevel(loglevel)
        self.__class__._setup = True
        return receivers
开发者ID:berg,项目名称:celery,代码行数:29,代码来源:log.py

示例7: setup_logging_subsystem

    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        if not is_py3k:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
        )
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(mlevel(loglevel))
                signals.after_setup_logger.send(
                    sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
                )

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile or "", _MP_FORK_LOGFORMAT_=format)
        Logging._setup = True

        return receivers
开发者ID:GVRGowtham,项目名称:mozillians,代码行数:35,代码来源:log.py

示例8: get_logger

def get_logger():
    """
    Return package logger -- if it does not already exist then it is created
    """
    from multiprocessing.util import get_logger

    return get_logger()
开发者ID:vladistan,项目名称:py3k-__format__-sprint,代码行数:7,代码来源:__init__.py

示例9: setup_logging_subsystem

    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = loglevel or self.loglevel
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                        loglevel=loglevel, logfile=logfile,
                        format=format, colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
                signals.after_setup_logger.send(sender=None, logger=logger,
                                        loglevel=loglevel, logfile=logfile,
                                        format=format, colorize=colorize)
        Logging._setup = True

        return receivers
开发者ID:ackdesha,项目名称:celery,代码行数:31,代码来源:log.py

示例10: main

    def main():
        assert is_forking(sys.argv)
        fd = int(sys.argv[-1])
        from_parent = os.fdopen(fd, 'rb')
        current_process()._inheriting = True
        preparation_data = load(from_parent)
        _forking.prepare(preparation_data)

        # Huge hack to make logging before Process.run work.
        loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
        logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
        format = os.environ.get("_MP_FORK_LOGFORMAT_")
        if loglevel:
            from multiprocessing import util
            import logging
            logger = util.get_logger()
            logger.setLevel(int(loglevel))
            if not logger.handlers:
                logger._rudimentary_setup = True
                logfile = logfile or sys.__stderr__
                if hasattr(logfile, "write"):
                    handler = logging.StreamHandler(logfile)
                else:
                    handler = logging.FileHandler(logfile)
                formatter = logging.Formatter(
                        format or util.DEFAULT_LOGGING_FORMAT)
                handler.setFormatter(formatter)
                logger.addHandler(handler)

        self = load(from_parent)
        current_process()._inheriting = False

        exitcode = self._bootstrap()
        exit(exitcode)
开发者ID:alessandrod,项目名称:celery,代码行数:34,代码来源:forking.py

示例11: setup_logging_subsystem

def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
        format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
        **kwargs):
    global _setup
    if not _setup:
        try:
            mputil._logger = None
        except AttributeError:
            pass
        ensure_process_aware_logger()
        logging.Logger.manager.loggerDict.clear()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger()
            for logger in (root, mp):
                _setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
        _setup = True
        return receivers
开发者ID:AlexArgus,项目名称:affiliates-lib,代码行数:28,代码来源:log.py

示例12: setup_logging_subsystem

    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = loglevel or self.loglevel
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil:
            try:
                mputil._logger = None
            except AttributeError:
                pass
        ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil and mputil.get_logger() or None
            for logger in (root, mp):
                if logger:
                    self._setup_logger(logger, logfile, format,
                                       colorize, **kwargs)
                    logger.setLevel(loglevel)
        Logging._setup = True
        return receivers
开发者ID:eldondev,项目名称:celery,代码行数:34,代码来源:log.py

示例13: initialize_logger

def initialize_logger(config):
    class StdErrWrapper:
        """
            Call wrapper for stderr
        """
        def write(self, s):
            get_logger().info(s)
    import logging

    logger = get_logger()
    values = dict(
        format='[%(levelname)s/%(processName)s] %(message)s',
        filename=None,
        level='INFO',
    )
    if config and config.has_section('log'):
        for (name, value) in config.items('log'):
            values[name] = value

    if values['filename']:
        formatter = logging.Formatter(values['format'])
        handler = logging.FileHandler(values['filename'])
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(
            getattr(logging, values['level'].upper(), logging.INFO)
        )
        sys.stderr = StdErrWrapper()
开发者ID:andrirad,项目名称:irma-probe,代码行数:28,代码来源:service.py

示例14: start_django_command

def start_django_command(config, args):
    '''
    Start a Django management command.
    
    This commands is supposed to start in a spawned (child process).
    It tries to import the settings of the project before handling the command.
    '''    
    initialize_logger(config)

    log('Starting command : %s' % ' '.join(args))
    get_logger().info('Starting command : %s' % ' '.join(args))
    from django.core.management import execute_from_command_line
    
    try:
        execute_from_command_line(args)
    except:
        error('Exception occured : %s' % traceback.format_exc())
开发者ID:jezhang,项目名称:django-cms-python,代码行数:17,代码来源:service.py

示例15: prepare

def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt'])
        )

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky', force=True)

    if 'tacker_pid' in data:
        from . import semaphore_tracker
        semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
开发者ID:as133,项目名称:scikit-learn,代码行数:45,代码来源:spawn.py


注:本文中的multiprocessing.util.get_logger函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。