当前位置: 首页>>代码示例>>Python>>正文


Python logbook.FileHandler类代码示例

本文整理汇总了Python中logbook.FileHandler的典型用法代码示例。如果您正苦于以下问题:Python FileHandler类的具体用法?Python FileHandler怎么用?Python FileHandler使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了FileHandler类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    warnings.simplefilter('error', UserWarning)
    warnings.simplefilter('error', Warning)

    level='WARNING'
    from logbook import FileHandler, StreamHandler

    log_handler1 = FileHandler('application.log')
    log_handler1.push_application()

    test4()
    pylab.show()
开发者ID:NeuroArchive,项目名称:NeuroUnits,代码行数:12,代码来源:demo4.py

示例2: setup_logging

def setup_logging(config):
    log_file = os.path.join(config['daemon']['app_path'],
                            config['daemon']['log']['file'])
    # if running in debug mode, disable log rotation because it makes
    # things confusing
    if config['daemon']['debug']:
        log_handler = FileHandler(log_file)
    else:
        max_size = config['daemon']['log']['rotate_size']
        backup_count = config['daemon']['log']['rotate_count']
        log_handler = RotatingFileHandler(log_file, max_size=max_size,
                                          backup_count=backup_count)
    log_handler.push_application()
    log = Logger('edgy_crits')
    return log
开发者ID:Lambdanaut,项目名称:crits-adapter,代码行数:15,代码来源:log_.py

示例3: start

    def start(self):
        """Initialize workdir, logging, etc. in preparation for running jobs.
        """

        # make a working directory for each job
        for job in self.jobs:
            job["workdir"] = os.path.join(self.workdir, job["description"])
            fs.maybe_mkdir(job["workdir"])
        # temporary ipython profile directory
        self.ipythondir = os.path.join(self.workdir, ".ipython")
        fs.maybe_mkdir(self.ipythondir)
        # log dir
        self.logdir = os.path.join(self.workdir, "log")
        fs.maybe_mkdir(self.logdir)

        # determine which IP we are going to listen on for logging
        try:
            self.listen_ip = localinterfaces.public_ips()[0]
        except:
            raise ValueError("This machine appears not to have"
                             " any publicly visible IP addresses")

        # setup ZMQ logging
        self.handler = FileHandler(os.path.join(self.logdir, "dish.log"))
        self.listen_port = str(randint(5000, 10000))
        self.subscriber = ZeroMQPullSubscriber("tcp://" + self.listen_ip +
                                               ":" + self.listen_port)
        self.controller = self.subscriber.dispatch_in_background(self.handler)
        self.logger = Logger("dish_master")
开发者ID:LabAdvComp,项目名称:dish,代码行数:29,代码来源:pipeline.py

示例4: __init__

 def __init__(self, settings={}, prefix=""):
     """initialize the Application with a settings dictionary and an optional
     ``prefix`` if this is a sub application"""
     self.settings = settings
     self.mapper = routes.Mapper()
     self.setup_handlers(self.mapper)
     self.loghandler = FileHandler(self.logfilename)
开发者ID:mrtopf,项目名称:We-Rate-It,代码行数:7,代码来源:app.py

示例5: __init__

    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository, self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0
开发者ID:chubbymaggie,项目名称:fibratus,代码行数:34,代码来源:fibratus_entrypoint.py

示例6: main

def main():
    """
    The main routine which kicks everything off
    :return:
    """

    # Setup the command line arguments
    flags = argparse.ArgumentParser(description="Tool to validate and fix errors in CSV files for TADC imports")
    flags.add_argument('csv_file', type=str, help="Path to a CSV file to validate")
    flags.add_argument('header_rows', type=str, help="Number of header rows")
    flags.add_argument('--fix-missing', '-f', action='store_true', help="Fix missing fields by inserting the value 'unknown'")
    flags.add_argument('--output-dir', '-o', type=str, help='Where to put output files', default=os.getcwd())
    flags.add_argument('--log-dir', '-l', type=str, help='Where to put log files', default='/tmp')
    flags.add_argument('--log-level', type=str, help='Choose a log level', default='INFO')
    flags.add_argument('--old-date-format', type=str, help="the format of dates that will be fixed", default='%d/%m/%Y')
    args = flags.parse_args()

    log_filename = os.path.join(
            args.log_dir,
            'tadc_import_validator_{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
        )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            with CSVFileValidator(
                    csv_file=args.csv_file,
                    header_rows=args.header_rows,
                    output_dir=args.output_dir,
                    old_date_format=args.old_date_format,
                    fix_missing=args.fix_missing) as validator:
                validator.validate_file()
                log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
                log.info("Log written to {}:".format(log_filename))
                log.info("Fixed data is in: {}".format(validator.get_fixed_filename()))
开发者ID:talis,项目名称:tadc-import-validator-tool,代码行数:47,代码来源:command_line.py

示例7: __init__

    def __init__(self, config=None, merge=False, **kwargs):
        """
        See qdb.config for more information about the configuration of
        qdb.
        merge denotes how config and kwargs should be merged.
        QdbConfig.kwargs_first says config will trample kwargs,
        QdbConfig.config_first says kwargs will trample config.
        Otherwise, kwargs and config cannot both be passed.
        """
        super(Qdb, self).__init__()
        if config and kwargs:
            if merge == QdbConfig.kwargs_first:
                first = kwargs
                second = config
            elif merge == QdbConfig.config_first:
                first = config
                second = kwargs
            else:
                raise TypeError('Cannot pass config and kwargs')
            config = first.merge(second)
        else:
            config = QdbConfig.get_config(config or kwargs)

        self.address = config.host, config.port
        self.set_default_file(config.default_file)
        self.default_namespace = config.default_namespace or {}
        self.exception_serializer = config.exception_serializer or \
            default_exception_serializer
        self.eval_fn = config.eval_fn or default_eval_fn
        self.green = config.green
        self._file_cache = {}
        self.redirect_output = config.redirect_output
        self.retry_attepts = config.retry_attepts
        self.repr_fn = config.repr_fn
        self.skip_fn = config.skip_fn or (lambda _: False)
        self.pause_signal = config.pause_signal \
            if config.pause_signal else signal.SIGUSR2
        self.uuid = str(config.uuid or uuid4())
        self.watchlist = {}
        self.execution_timeout = config.execution_timeout
        # We need to be able to send stdout back to the user debugging the
        # program. We hold a handle to this in case the program resets stdout.
        if self.redirect_output:
            self._old_stdout = sys.stdout
            self._old_stderr = sys.stderr
            self.stdout = StringIO()
            self.stderr = StringIO()
            sys.stdout = self.stdout
            sys.stderr = self.stderr
        self.forget()
        self.log_handler = None
        if config.log_file:
            self.log_handler = FileHandler(config.log_file)
            self.log_handler.push_application()
        self.cmd_manager = (config.cmd_manager or RemoteCommandManager)(self)
        self.cmd_manager.start(config.auth_msg)
开发者ID:ThisGuyCodes,项目名称:qdb,代码行数:56,代码来源:tracer.py

示例8: main

def main():
    """watch a specific directory, logging changes and
    running python scripts when they are written to disk"""
    home_dir = Path(environ.get('HOME'))
    run_logfile = home_dir / 'pyrun.log'
    watchdog_logfile = home_dir / 'pydir.log'
    run_log = FileHandler(str(run_logfile), level='NOTICE', bubble=True, mode='w', delay=True)
    file_log = FileHandler(str(watchdog_logfile), level='INFO', bubble=True)
    with run_log.applicationbound():
        with file_log.applicationbound():
            watched_dir = home_dir / 'code' / 'pyrep' / 'coderunner' / 'snippets'
            handler = MyEventHandler(run_logfile, run_log)
            obs = InotifyObserver()
            obs.schedule(handler, str(watched_dir), False)
            obs.start()
            try:
                while True:
                    sleep(1)
            except: #  pylint: disable=bare-except
                obs.stop()
            obs.join()
开发者ID:tulanthoar,项目名称:pygit,代码行数:21,代码来源:runner.py

示例9: Application

class Application(object):
    """a base class for dispatching WSGI requests"""
    
    def __init__(self, settings={}, prefix=""):
        """initialize the Application with a settings dictionary and an optional
        ``prefix`` if this is a sub application"""
        self.settings = settings
        self.mapper = routes.Mapper()
        self.setup_handlers(self.mapper)
        self.loghandler = FileHandler(self.logfilename)

    def __call__(self, environ, start_response):
        with self.loghandler.threadbound():
            request = werkzeug.Request(environ)
            m = self.mapper.match(environ = environ)
            if m is not None:
                handler = m['handler'](app=self, request=request, settings=self.settings)
                try:
                    return handler.handle(**m)(environ, start_response)
                except werkzeug.exceptions.HTTPException, e:
                    return e(environ, start_response)
            # no view found => 404
            return werkzeug.exceptions.NotFound()(environ, start_response)
开发者ID:mrtopf,项目名称:We-Rate-It,代码行数:23,代码来源:app.py

示例10: Qdb

class Qdb(Bdb, object):
    """
    The Quantopian Remote Debugger.
    """
    _instance = None

    def __new__(cls, *args, **kwargs):
        """
        Qdb objects are singletons that persist until their disable method is
        called.
        """
        if not cls._instance:
            cls._instance = super(Qdb, cls).__new__(cls)
            cls._instance._init(*args, **kwargs)
        return cls._instance

    def __init__(self, *args, **kwargs):
        pass

    def _init(self, config=None, merge=False, **kwargs):
        """
        See qdb.config for more information about the configuration of
        qdb.
        merge denotes how config and kwargs should be merged.
        QdbConfig.kwargs_first says config will trample kwargs,
        QdbConfig.config_first says kwargs will trample config.
        Otherwise, kwargs and config cannot both be passed.
        """
        self.super_ = super(Qdb, self)
        self.super_.__init__()
        self.reset()
        if config and kwargs:
            if merge == QdbConfig.kwargs_first:
                first = kwargs
                second = config
            elif merge == QdbConfig.config_first:
                first = config
                second = kwargs
            else:
                raise TypeError('Cannot pass config and kwargs')
            config = first.merge(second)
        else:
            config = QdbConfig.get_config(config or kwargs)

        self.address = config.host, config.port
        self.set_default_file(config.default_file)
        self.default_namespace = config.default_namespace or {}
        self.exception_serializer = config.exception_serializer or \
            default_exception_serializer
        self.eval_fn = config.eval_fn or default_eval_fn
        self.green = config.green
        self._file_cache = {}
        self.redirect_output = config.redirect_output
        self.retry_attepts = config.retry_attepts
        self.repr_fn = config.repr_fn
        self._skip_fn = config.skip_fn or (lambda _: False)
        self.pause_signal = config.pause_signal \
            if config.pause_signal else signal.SIGUSR2
        self.uuid = str(config.uuid or uuid4())
        self.watchlist = {}
        self.execution_timeout = config.execution_timeout
        self.reset()
        self.log_handler = None
        if config.log_file:
            self.log_handler = FileHandler(config.log_file)
            self.log_handler.push_application()

        # The timing between these lines might matter depending on the
        # cmd_manager. Don't seperate them.
        self.cmd_manager = (config.cmd_manager or RemoteCommandManager)(self)
        self.cmd_manager.start(config.auth_msg)

        # We need to be able to send stdout back to the user debugging the
        # program. We hold a handle to this in case the program resets stdout.
        if self.redirect_output:
            self._old_stdout = sys.stdout
            self._old_stderr = sys.stderr
            sys.stdout = OutputTee(
                sys.stdout,
                RemoteOutput(self.cmd_manager, '<stdout>'),
            )
            sys.stderr = OutputTee(
                sys.stderr,
                RemoteOutput(self.cmd_manager, '<stderr>'),
            )

    def skip_fn(self, path):
        return self._skip_fn(self.canonic(path))

    def restore_output_streams(self):
        """
        Restores the original output streams.
        """
        if self.redirect_output:
            sys.stdout = self._old_stdout
            sys.stderr = self._old_stderr

    def _new_execution_timeout(self, src):
        """
        Return a new execution timeout context manager.
#.........这里部分代码省略.........
开发者ID:fuyuanwu,项目名称:qdb,代码行数:101,代码来源:tracer.py

示例11: ArgumentParser

if __name__ == '__main__':
    '''Usage: python tk_maintain.py --log hehe.log
    '''

    from logbook import FileHandler
    from logbook import Logger
    from argparse import ArgumentParser
    import sys
    parser = ArgumentParser()
    logpath = './log/'
    parser.add_argument('--log', nargs=1, help='log path')
    parser.add_argument('--version', nargs=1, help='maintain version')
    args = parser.parse_args(sys.argv[1:])
    logfilepath = logpath + args.log[0]
    maintain_version = args.version[0]
    log_handler = FileHandler(logfilepath)
    logbk = Logger('Token Maintain')

    with log_handler.applicationbound():
        logbk.info('maintain prepare')

        at_least = AT_LEAST_TOKEN_COUNT
        max_tokens_redis_limit = MAX_TOKENS_IN_REDIS

        logbk.info('maintain begin')

        # 认证新用户,并将access_token加入mongodb,redis从mongodb导入新token,不重置已有token 的 req_count
        if maintain_version == 'addatoken':
            print 'generate new token, write to mongo, push to redis without reset request count'
            generate_api_access_token(logbk)
            add_without_reset_req_count(max_tokens_redis_limit, logbk)
开发者ID:lijiahong,项目名称:utils4scrapy,代码行数:31,代码来源:tk_maintain.py

示例12: main

def main():
    """Shows basic usage of the Google Drive API.

    Creates a Google Drive API service object and outputs the names and IDs
    for up to 10 files.
    """

    log_filename = os.path.join(
        args.log_dir,
        'google-drive-to-s3-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            s3 = boto3.resource('s3')

            # load up a match file if we have one.
            if args.match_file:
                with open(args.match_file, 'r') as f:
                    match_filenames = f.read().splitlines()
            else:
                match_filenames = None

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.folder_id),
                fields="nextPageToken, files(id, name)"
            )

            # make sure our S3 Key prefix has a trailing slash
            key_prefix = ensure_trailing_slash(args.key_prefix)

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        if we_should_process_this_file(this_file['name'], match_filenames):
                            log.info(u"#== Processing {} file number {} on page {}. {} files processed.".format(
                                this_file['name'],
                                page_file_counter,
                                page_counter,
                                file_counter
                            ))

                            # download the file
                            download_request = drive_service.files().get_media(fileId=this_file['id'])
                            fh = io.BytesIO()  # Using an in memory stream location
                            downloader = MediaIoBaseDownload(fh, download_request)
                            done = False
                            pbar = InitBar(this_file['name'])
                            while done is False:
                                status, done = downloader.next_chunk()
                                pbar(int(status.progress()*100))
                                # print("\rDownload {}%".format(int(status.progress() * 100)))
                            del pbar

                            # upload to bucket
                            log.info(u"Uploading to S3")
                            s3.Bucket(args.bucket).put_object(
                                Key="{}{}".format(key_prefix, this_file['name']),
                                Body=fh.getvalue(),
                                ACL='public-read'
                            )
                            log.info(u"Uploaded to S3")
                            fh.close()  # close the file handle to release memory
                        else:
                            log.info(u"Do not need to process {}".format(this_file['name']))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
#.........这里部分代码省略.........
开发者ID:timhodson,项目名称:google-drive-file-download,代码行数:101,代码来源:google-drive-to-s3.py

示例13: FileHandler

import requests
import time
import json
import zmq
from logbook import FileHandler, catch_exceptions
from sachintweets.models import connect, MongoException

####################################### Log book setup #########################
log_handler = FileHandler('fetch_user_details.log')
log_handler.push_application()

###################################### Constants ###############################
LOCK_FILE = 'recv.lock'

###################################### Mongodb connection ######################
try:
    db =  connect() 
    if db:
       tweet  = db.tweet
except Exception as e:
    log_handler.write(e.message)


def fetch_user_details():
    tweets = tweet.find({})
    for t in tweets:
        if not 'screen_name' in t:
            r = requests.get("http://api.twitter.com/1/statuses/show/%d.json"\
            %(t['tid']))
            d = json.loads(r.content)
            if 'user' in d:
开发者ID:kracekumar,项目名称:sachintweets,代码行数:31,代码来源:fetch_user_details.py

示例14: FileHandler

import requests
import time
import json
import zmq
from multiprocessing import Process
from logbook import FileHandler, catch_exceptions
from sachintweets.models import connect, MongoException
from twitter import username, password
from os import remove, getpid

####################################### Log book setup #########################
log_handler = FileHandler('recv.log')
log_handler.push_application()

###################################### Constants ###############################
LOCK_FILE = 'recv.lock'

###################################### Mongodb connection ######################
try:
    db =  connect() 
    if db:
       tweet  = db.tweet
except Exception as e:
    log_handler.write(e.message)

#################################### ZERO MQ PULLER ############################
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://*:6789")
socket.setsockopt(zmq.SUBSCRIBE, "")
开发者ID:kracekumar,项目名称:sachintweets,代码行数:30,代码来源:recv.py

示例15: main

def main(user_email,
         url_api_collection,
         log_handler=None,
         mail_handler=None,
         dir_profile='profiles',
         profile_path=None,
         config_file=None,
         **kwargs):
    '''Executes a harvest with given parameters.
    Returns the ingest_doc_id, directory harvest saved to and number of
    records.
    '''
    if not config_file:
        config_file = os.environ.get('DPLA_CONFIG_FILE', 'akara.ini')
    num_recs = -1
    my_mail_handler = None
    if not mail_handler:
        my_mail_handler = logbook.MailHandler(
            EMAIL_RETURN_ADDRESS, user_email, level='ERROR', bubble=True)
        my_mail_handler.push_application()
        mail_handler = my_mail_handler
    try:
        collection = Collection(url_api_collection)
    except Exception as e:
        msg = 'Exception in Collection {}, init {}'.format(url_api_collection,
                                                           str(e))
        logbook.error(msg)
        raise e
    if not (collection['harvest_type'] in HARVEST_TYPES):
        msg = 'Collection {} wrong type {} for harvesting. Harvest type {} \
                is not in {}'.format(url_api_collection,
                                     collection['harvest_type'],
                                     collection['harvest_type'],
                                     HARVEST_TYPES.keys())
        logbook.error(msg)
        raise ValueError(msg)
    mail_handler.subject = "Error during harvest of " + collection.url
    my_log_handler = None
    if not log_handler:  # can't init until have collection
        my_log_handler = FileHandler(get_log_file_path(collection.slug))
        my_log_handler.push_application()
    logger = logbook.Logger('HarvestMain')
    msg = 'Init harvester next. Collection:{}'.format(collection.url)
    logger.info(msg)
    # email directly
    mimetext = create_mimetext_msg(EMAIL_RETURN_ADDRESS, user_email, ' '.join(
        ('Starting harvest for ', collection.slug)), msg)
    try:  # TODO: request more emails from AWS
        mail_handler.deliver(mimetext, '[email protected]')
    except:
        pass
    logger.info('Create DPLA profile document')
    if not profile_path:
        profile_path = os.path.abspath(
            os.path.join(dir_profile, collection.id + '.pjs'))
    with codecs.open(profile_path, 'w', 'utf8') as pfoo:
        pfoo.write(collection.dpla_profile)
    logger.info('DPLA profile document : ' + profile_path)
    harvester = None
    try:
        harvester = HarvestController(
            user_email,
            collection,
            profile_path=profile_path,
            config_file=config_file,
            **kwargs)
    except Exception as e:
        import traceback
        msg = 'Exception in harvester init: type: {} TRACE:\n{}'.format(
            type(e), traceback.format_exc())
        logger.error(msg)
        raise e
    logger.info('Create ingest doc in couch')
    ingest_doc_id = harvester.create_ingest_doc()
    logger.info('Ingest DOC ID: ' + ingest_doc_id)
    logger.info('Start harvesting next')
    try:
        num_recs = harvester.harvest()
        msg = ''.join(('Finished harvest of ', collection.slug, '. ',
                       str(num_recs), ' records harvested.'))
        harvester.update_ingest_doc('complete', items=num_recs, num_coll=1)
        logger.info(msg)
        # email directly
        mimetext = create_mimetext_msg(
            EMAIL_RETURN_ADDRESS, user_email, ' '.join(
                ('Finished harvest of raw records '
                 'for ', collection.slug, ' enriching next')), msg)
        try:
            mail_handler.deliver(mimetext, '[email protected]')
        except:
            pass
    except Exception as e:
        import traceback
        error_msg = ''.join(("Error while harvesting: type-> ", str(type(e)),
                             " TRACE:\n" + str(traceback.format_exc())))
        logger.error(error_msg)
        harvester.update_ingest_doc(
            'error', error_msg=error_msg, items=num_recs)
        raise e
    if my_log_handler:
#.........这里部分代码省略.........
开发者ID:mredar,项目名称:harvester,代码行数:101,代码来源:controller.py


注:本文中的logbook.FileHandler类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。