当前位置: 首页>>代码示例>>Python>>正文


Python utils.setup_logger函数代码示例

本文整理汇总了Python中utils.setup_logger函数的典型用法代码示例。如果您正苦于以下问题:Python setup_logger函数的具体用法?Python setup_logger怎么用?Python setup_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了setup_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    args = _get_args()
    mkdirp(conf.get_opt("session_dir"), exit_on_err=True)

    if args.mode == "list":
        session_dir = conf.get_opt("session_dir")
    else:
        session_dir = os.path.join(conf.get_opt("session_dir"),
                                   args.session)

    if not os.path.exists(session_dir) and args.mode not in ["create", "list"]:
        fail("Invalid session %s" % args.session)

    vol_dir = os.path.join(session_dir, args.volume)
    if not os.path.exists(vol_dir) and args.mode not in ["create", "list"]:
        fail("Session %s not created with volume %s" %
            (args.session, args.volume))

    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "cli.log")
    setup_logger(logger, log_file, args.debug)

    # globals() will have all the functions already defined.
    # mode_<args.mode> will be the function name to be called
    globals()["mode_" + args.mode](session_dir, args)
开发者ID:bcicen,项目名称:glusterfs,代码行数:29,代码来源:main.py

示例2: main

def main(argv):
    """
    Entry point for etl module.
    """
    option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT)
    option_parser.add_option("-c", "--config", dest="config",
                             default="config.cfg", help="Configuration file")
    option_parser.add_option("-v", "--verbose", dest="verbose",
                             action="store_true", default=False,
                             help="Show verbose output")
    options, _ = option_parser.parse_args(argv)

    if not os.path.exists(options.config):
        sys.stderr.write("ERROR: {} does not exist\n".format(options.config))
        option_parser.print_help()
        return 1
    config = read_config(options.config)

    log_dir = config['general']['log_dir']
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    filename = os.path.join(log_dir, __file__.replace(".py", ".log"))
    setup_logger(filename, options.verbose)
    logging.debug("config={}".format(json.dumps(config, indent=2)))

    retcode = run_etl(config)
    return retcode
开发者ID:ydupont,项目名称:101repo,代码行数:27,代码来源:run_etl.py

示例3: main

def main():
    global gtmpfilename

    args = None

    try:
        args = _get_args()
        mkdirp(conf.get_opt("session_dir"), exit_on_err=True)

        # force the default session name if mode is "query"
        if args.mode == "query":
            args.session = "default"

        if args.mode == "list":
            session_dir = conf.get_opt("session_dir")
        else:
            session_dir = os.path.join(conf.get_opt("session_dir"),
                                       args.session)

        if not os.path.exists(session_dir) and \
                args.mode not in ["create", "list", "query"]:
            fail("Invalid session %s" % args.session)

        # "default" is a system defined session name
        if args.mode in ["create", "post", "pre", "delete"] and \
                args.session == "default":
            fail("Invalid session %s" % args.session)

        vol_dir = os.path.join(session_dir, args.volume)
        if not os.path.exists(vol_dir) and args.mode not in \
                ["create", "list", "query"]:
            fail("Session %s not created with volume %s" %
                 (args.session, args.volume))

        mkdirp(os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume),
               exit_on_err=True)
        log_file = os.path.join(conf.get_opt("log_dir"),
                                args.session,
                                args.volume,
                                "cli.log")
        setup_logger(logger, log_file, args.debug)

        # globals() will have all the functions already defined.
        # mode_<args.mode> will be the function name to be called
        globals()["mode_" + args.mode](session_dir, args)
    except KeyboardInterrupt:
        if args is not None:
            if args.mode == "pre" or args.mode == "query":
                # cleanup session
                if gtmpfilename is not None:
                    # no more interrupts until we clean up
                    signal.signal(signal.SIGINT, signal.SIG_IGN)
                    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

        # Interrupted, exit with non zero error code
        sys.exit(2)
开发者ID:raghavendrabhat,项目名称:glusterfs,代码行数:58,代码来源:main.py

示例4: init_event_server

def init_event_server():
    utils.setup_logger()

    # Delete Socket file if Exists
    try:
        os.unlink(SERVER_ADDRESS)
    except OSError:
        if os.path.exists(SERVER_ADDRESS):
            print ("Failed to cleanup socket file {0}".format(SERVER_ADDRESS),
                   file=sys.stderr)
            sys.exit(1)

    utils.load_all()

    # Start the Eventing Server, UNIX DOMAIN SOCKET Server
    GlusterEventsServer()
    asyncore.loop()
开发者ID:Junsu,项目名称:glusterfs,代码行数:17,代码来源:glustereventsd.py

示例5: init_event_server

def init_event_server():
    utils.setup_logger()
    utils.load_all()

    port = utils.get_config("port")
    if port is None:
        sys.stderr.write("Unable to get Port details from Config\n")
        sys.exit(1)

    # Start the Eventing Server, UDP Server
    try:
        server = SocketServer.ThreadingUDPServer(
            (SERVER_ADDRESS, port),
            GlusterEventsRequestHandler)
    except socket.error as e:
        sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
        sys.exit(1)
    server.serve_forever()
开发者ID:fmpnate,项目名称:glusterfs,代码行数:18,代码来源:glustereventsd.py

示例6: mode_cleanup

def mode_cleanup(args):
    working_dir = os.path.join(conf.get_opt("working_dir"),
                               args.session,
                               args.volume)

    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "changelog.log")

    setup_logger(logger, log_file)

    try:
        shutil.rmtree(working_dir, onerror=handle_rm_error)
    except (OSError, IOError) as e:
        logger.error("Failed to delete working directory: %s" % e)
        sys.exit(1)
开发者ID:LlsDimple,项目名称:glusterfs,代码行数:19,代码来源:nodeagent.py

示例7: __init__

    def __init__(self):
        print "starting server"
        self.key = '' # generate public/private key
        self.peers = {}
        self.p2pfiles = []  # list of P2PFile
        self.server_id = os.urandom(8).encode('hex')
        self.max_peer_sem = threading.Semaphore(MAX_PEERS)   # This is to control shard serving requests
        self._load_files()  # load metadata and create dirs
        self.logger = utils.setup_logger(LOG_FILE)  # setup logger
        self._load_keys()   # load publickey

        self.heartbeat_thrd = threading.Timer(HEARTBEAT_TIMEOUT, self.check_clients) #Thread to monitor alive peers
        self.heartbeat_thrd.setDaemon(True)
        self.heartbeat_thrd.start()
开发者ID:naveednu,项目名称:p2p_python,代码行数:14,代码来源:server.py

示例8: __init__

    def __init__(self, ec2, ec2_client, tag_base_name, **kwargs):
        """Constructor

        Args:
            ec2 (object): Aws Ec2 session
            ec2_client (object): Aws ec2 session
            tag_base_name (string): Tag base name
            **kwargs: Multiple arguments

        Raises:
            TypeError: Description
        """
        BaseResources.__init__(self, ec2, ec2_client, tag_base_name)
        log_level = kwargs.pop("log_level", logging.WARNING)
        boto_log_level = kwargs.pop("boto_log_level", logging.WARNING)

        if kwargs:
            raise TypeError("Unexpected **kwargs: %r" % kwargs)
        self.logger = setup_logger(__name__, log_level, boto_log_level)
开发者ID:davidlonjon,项目名称:aws-proxies,代码行数:19,代码来源:network_interfaces.py

示例9: main

def main():
    parser = argparse.ArgumentParser(
        description='pyrasite - inject code into a running python process',
        epilog="For updates, visit https://github.com/lmacken/pyrasite"
        )
    parser.add_argument('pid',
                        help="The ID of the process to inject code into")
    parser.add_argument('filename',
                        help="The second argument must be a filename")
    parser.add_argument('--gdb-prefix', dest='gdb_prefix',
                        help='GDB prefix (if specified during installation)',
                        default="")
    parser.add_argument('--verbose', dest='verbose', help='Verbose mode',
                        default=False, action='store_const', const=True)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    log = setup_logger()

    try:
        pid = int(args.pid)
    except ValueError:
        log.error("Error: The first argument must be a pid")
        sys.exit(2)

    filename = args.filename
    if filename:
        if not os.path.exists(filename):
            log.error("Error: Invalid path or file doesn't exist")
            sys.exit(3)
    else:
        log.error("Error: The second argument must be a filename")
        sys.exit(4)

    injector = CodeInjector(pid, verbose=args.verbose,
                            gdb_prefix=args.gdb_prefix)
    injector.inject(filename)
开发者ID:nsmgr8,项目名称:pyrasite,代码行数:41,代码来源:main.py

示例10: __init__

    def __init__(self):
        print "Client running at: %s" % str(LOCAL_ADDRESS)
        self.server_key = '' # server's public key
        self.current_peers = {} # current active peers
        self.peer_times = {}
        self.p2pfiles = []  # list of p2p files
        self.shards = []    # list of shards 
        self.peer_id = None
        self.max_peer_sem = threading.Semaphore(MAX_PEERS)   # This is to control shard serving requests
        self._load_files()
        self.logger = utils.setup_logger(LOG_FILE)
        self._reg_with_server()

        self.ping_thrd = threading.Timer(1.0, self._ping_server_thread)
        self.ping_thrd.setDaemon(True)
        self.ping_thrd.start()
        
        self.peer_thrd = threading.Timer(1.0, self._peer_contact_thread)
        self.peer_thrd.setDaemon(True)
        self.peer_thrd.start()

        self.timeout_thrd = threading.Timer(1.0, self._peer_timeout_thread)
        self.timeout_thrd.setDaemon(True)
        self.timeout_thrd.start()
开发者ID:naveednu,项目名称:p2p_python,代码行数:24,代码来源:client.py

示例11: setup_logger

__date__ = "30/07/2012"
__copyright__ = "Copyright 2012, Australia Indonesia Facility for " "Disaster Reduction"

import os
import sys
import logging
from urllib2 import URLError
from zipfile import BadZipfile

from ftp_client import FtpClient
from sftp_client import SFtpClient
from utils import setup_logger, data_dir, is_event_id
from shake_event import ShakeEvent

# Loading from package __init__ not working in this context so manually doing
setup_logger()
LOGGER = logging.getLogger("InaSAFE")


def process_event(event_id=None, locale="en"):
    """Launcher that actually runs the event processing.

    :param event_id: The event id to process. If None the latest event will
       be downloaded and processed.
    :type event_id: str

    :param locale: The locale that will be used. Default to en.
    :type locale: str
    """
    population_path = os.path.join(data_dir(), "exposure", "IDN_mosaic", "popmap10_all.tif")
开发者ID:rendyhermawan,项目名称:inasafe,代码行数:30,代码来源:make_map.py

示例12: main

def main(argv):
    """
    Entry point for chart generation.
    """
    option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT)
    option_parser.add_option("-c", "--config", dest="config",
                             default="config.cfg", help="Configuration file")
    option_parser.add_option("-v", "--verbose", dest="verbose",
                             action="store_true", default=False,
                             help="Show verbose output")

    option_parser.add_option("-x", "--x_axis", type="choice",
                             choices=[TIME],
                             help="Label for x-axis: time")

    option_parser.add_option("-X", "--x_output", dest="x_output",
                             help="Unit for x-axis. "
                             "time: daily, month, yearly or range 2000,2003. "
                             "Range for time is separated by comma to allow "
                             "daily range such as -X 2000-01-01,2003-08-20")

    option_parser.add_option("-y", "--y_axis", type="choice",
                             choices=[SALARY],
                             help="Label for y-axis: salary")

    option_parser.add_option("-Y", "--y_output", type="choice",
                             choices=[MEDIAN, TOTAL],
                             help="Unit for y-axis. salary: median or total")

    option_parser.add_option("-d", "--data_type", type="choice",
                             choices=[AGE, COMPANY, GENDER, JOBROLE, MANAGER],
                             help="Data type for plotted lines: "
                             "age, company, gender, jobrole or manager")

    option_parser.add_option("-D", "--data_output", dest="data_output",
                             help="Unit for plotted lines. "
                             "age: comma-separated list of ages or ranges, "
                             "company: comma-separated list of IDs, "
                             "gender: comma-separated list (male,female, "
                             "undefined or empty for all), "
                             "jobrole: comma-separated list of job roles, "
                             "manager: comma-separated list (true,false)")

    options, _ = option_parser.parse_args(argv)
    error = ""
    if not options.x_axis:
        error = "--x_axis is required"
    if not options.x_output:
        error = "--x_output is required"
    if not options.y_axis:
        error = "--y_axis is required"
    if not options.y_output:
        error = "--y_output is required"
    if not options.data_type:
        error = "--data_type is required"
    if not options.data_output:
        error = "--data_output is required"
    if error:
        sys.stderr.write("ERROR: {}\n".format(error))
        option_parser.print_help()
        return 1

    if not os.path.exists(options.config):
        sys.stderr.write("ERROR: {} does not exist\n".format(options.config))
        option_parser.print_help()
        return 1
    config = read_config(options.config)

    log_dir = config['general']['log_dir']
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    filename = os.path.join(log_dir, __file__.replace(".py", ".log"))
    setup_logger(filename, options.verbose)
    logging.debug("config={}".format(json.dumps(config, indent=2)))

    retcode = generate_chart(config, options)
    return retcode
开发者ID:ydupont,项目名称:101repo,代码行数:77,代码来源:generate_chart.py

示例13: main

def main(sim_dir, proc_time, num_jobs, jobs_per, in_gb, out_gb, out_gb_dl,
         up_rate, down_rate, bid_ratio, instance_type, av_zone, product,
         csv_file=None):
    '''
    Function to calculate spot instance run statistics based on job
    submission parameters; this function will save the statistics and
    specific spot history in csv dataframes to execution directory

    Parameters
    ----------
    sim_dir : string
        base directory where to create the availability zone folders
        for storing the simulation results
    proc_time : float
        the number of minutes a single job of interest takes to run
    num_jobs : integer
        total number of jobs to run to complete job submission
    jobs_per : integer
        the number of jobs to run per node
    in_gb : float
        the total amount of input data for a particular job (in GB)
    out_gb : float
        the total amount of output data from a particular job (in GB)
    out_gb_dl : float
        the total amount of output data to download from EC2 (in GB)
    up_rate : float
        the average upload rate to transfer data to EC2 (in Mb/s)
    down_rate : float
        the average download rate to transfer data from EC2 (in Mb/s)
    bid_ratio : float
        the ratio to average spot history price to set the bid price to
    instance_type : string
        type of instance to run the jobs on and to get spot history for
    av_zone : string
        the AWS EC2 availability zone (sub-region) to get spot history
        from
    product : string
        the type of operating system product to get spot history for
    csv_file : string (optional), default is None
        the filepath to a csv dataframe to get spot history from;
        if not specified, the function will just get the most recent 90
        days worth of spot price history

    Returns
    -------
    spot_history : pd.DataFrame object
        in addition to saving this as './spot_history.csv' the
        dataframe can also be returned as an object in memory
    stat_df : pd.DataFrame object
        in addition to saving this as './<info>_stats.csv' the
        dataframe can also be returned as an object in memory
    '''

    # Import packages
    import dateutil
    import logging
    import numpy as np
    import os
    import pandas as pd
    import yaml

    # Import local packages
    import utils
    from record_spot_price import return_spot_history

    # Init variables
    proc_time *= 60.0
    num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)

    # Init simulation market results dataframe
    sim_df_cols = ['start_time', 'spot_hist_csv', 'proc_time', 'num_datasets',
                   'jobs_per_node', 'num_jobs_iter', 'bid_ratio', 'bid_price',
                   'median_history', 'mean_history', 'stdev_history',
                   'compute_time', 'wait_time', 'per_node_cost',
                   'num_interrupts', 'first_iter_time']
    sim_df = pd.DataFrame(columns=sim_df_cols)

    # Init full run stats data frame
    stat_df_cols = ['Total cost', 'Instance cost', 'Storage cost', 'Tranfer cost',
                    'Total time', 'Run time', 'Wait time',
                    'Upload time', 'Download time']
    stat_df = pd.DataFrame(columns=stat_df_cols)

    # Set up logger
    base_dir = os.path.join(sim_dir, av_zone)
    if not os.path.exists(base_dir):
        try:
            os.makedirs(base_dir)
        except OSError as exc:
            print 'Found av zone directory %s, continuing...' % av_zone
    log_path = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid.log' % \
                            (instance_type, num_jobs, bid_ratio))
    stat_log = utils.setup_logger('stat_log', log_path, logging.INFO, to_screen=True)

    # Check to see if simulation was already run (sim csv file exists)
    sim_csv = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_sim.csv' % \
                           (instance_type, num_jobs, bid_ratio))
    if os.path.exists(sim_csv):
        stat_log.info('Simulation file %s already exists, skipping...' % sim_csv)
        return
#.........这里部分代码省略.........
开发者ID:computational-neuroimaging-lab,项目名称:Clark2015_AWS,代码行数:101,代码来源:spot_price_model.py

示例14: IP

    opt.add_option('--dhcp-subnetmask', dest='dhcp_subnetmask', default=DHCP_DEFAULT_SUBNETMASK, action='store',
            help='DHCP lease subnet mask')
    opt.add_option('--dhcp-gateway', dest='dhcp_gateway', default=DHCP_DEFAULT_GW, action='store',
            help='DHCP lease gateway')
    opt.add_option('--dhcp-dns', dest='dhcp_dns', default=DHCP_DEFAULT_DNS, action='store',
            help='DHCP lease DNS')
    opt.add_option('--dhcp-bcast', dest='dhcp_bcast', default=DHCP_DEFAULT_BCAST, action='store',
            help='DHCP lease broadcast')
    opt.add_option('--dhcp-fileserver', dest='dhcp_fileserver', default='', action='store',
            help='DHCP lease fileserver IP (option 66)')
    opt.add_option('--dhcp-filename', dest='dhcp_filename', default='', action='store',
            help='DHCP lease filename (option 67)')

    options, args = opt.parse_args(sys.argv[1:])

    main_logger = utils.setup_logger('main_logger', options.logfile, options.debug)
    sip_logger = utils.setup_logger('sip_logger', options.sip_logfile, options.debug, str_format='%(asctime)s %(message)s')    
    
    main_logger.info("Starting application")
    
    main_logger.debug("SIP: Writing SIP messages in %s log file" % options.sip_logfile)
    main_logger.debug("SIP: Authentication password: %s" % options.sip_password)
    main_logger.debug("Logfile: %s" % options.logfile)
    
    if not options.terminal:
        import gui
        import Tkinter as tk

        root = tk.Tk()
        app = gui.MainApplication(root, options, main_logger)
        root.title(sys.argv[0])
开发者ID:svenno,项目名称:SPLiT,代码行数:31,代码来源:SPLiT.py

示例15: load_cate_data

    options.update({'test.save': os.path.join(sprefix, sfolder, 'test.save')})
    options.update({'params.save': os.path.join(sprefix, sfolder, 'params.save')})
    if not os.path.exists(os.path.join(sprefix, sfolder)):
        os.makedirs(os.path.join(sprefix, sfolder))

    # current word included
    train_set, test_set = load_cate_data(options)
    mlp = init_cate_params(options)
    run_cate(mlp, train_set, test_set, options)


if __name__ == "__main__":
    log_folder = '../log'
    main_file = 'main_log'
    correct_file = 'correct_log'
    error_file = 'error_log'
    main_name = 'main'
    corr_name = 'correct'
    error_name = 'error'

    stript_path = os.path.dirname(os.path.abspath(__file__))
    log_folder_path = os.path.join(stript_path, log_folder)
    if not os.path.exists(log_folder_path):
        os.makedirs(log_folder_path)
    log_file_names = (main_file, correct_file, error_file)
    logger_names = (main_name, corr_name, error_name)
    setup_logger(log_folder_path, log_file_names, logger_names)
    log_main = logging.getLogger(main_name)
    log_main.info('\n*****date: %s*****' % datetime.datetime.now())
    main()
开发者ID:fishiwhj,项目名称:Category_Tagging_Reranking,代码行数:30,代码来源:tagging_cate_part.py


注:本文中的utils.setup_logger函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。