当前位置: 首页>>代码示例>>Python>>正文


Python FileHandler.applicationbound方法代码示例

本文整理汇总了Python中logbook.FileHandler.applicationbound方法的典型用法代码示例。如果您正苦于以下问题:Python FileHandler.applicationbound方法的具体用法?Python FileHandler.applicationbound怎么用?Python FileHandler.applicationbound使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在logbook.FileHandler的用法示例。


在下文中一共展示了FileHandler.applicationbound方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
def main():
    """watch a specific directory, logging changes and
    running python scripts when they are written to disk"""
    home_dir = Path(environ.get('HOME'))
    run_logfile = home_dir / 'pyrun.log'
    watchdog_logfile = home_dir / 'pydir.log'
    run_log = FileHandler(str(run_logfile), level='NOTICE', bubble=True, mode='w', delay=True)
    file_log = FileHandler(str(watchdog_logfile), level='INFO', bubble=True)
    with run_log.applicationbound():
        with file_log.applicationbound():
            watched_dir = home_dir / 'code' / 'pyrep' / 'coderunner' / 'snippets'
            handler = MyEventHandler(run_logfile, run_log)
            obs = InotifyObserver()
            obs.schedule(handler, str(watched_dir), False)
            obs.start()
            try:
                while True:
                    sleep(1)
            except: #  pylint: disable=bare-except
                obs.stop()
            obs.join()
开发者ID:tulanthoar,项目名称:pygit,代码行数:23,代码来源:runner.py

示例2: main

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
def main():
    """
    The main routine which kicks everything off
    :return:
    """

    # Setup the command line arguments
    flags = argparse.ArgumentParser(description="Tool to validate and fix errors in CSV files for TADC imports")
    flags.add_argument('csv_file', type=str, help="Path to a CSV file to validate")
    flags.add_argument('header_rows', type=str, help="Number of header rows")
    flags.add_argument('--fix-missing', '-f', action='store_true', help="Fix missing fields by inserting the value 'unknown'")
    flags.add_argument('--output-dir', '-o', type=str, help='Where to put output files', default=os.getcwd())
    flags.add_argument('--log-dir', '-l', type=str, help='Where to put log files', default='/tmp')
    flags.add_argument('--log-level', type=str, help='Choose a log level', default='INFO')
    flags.add_argument('--old-date-format', type=str, help="the format of dates that will be fixed", default='%d/%m/%Y')
    args = flags.parse_args()

    log_filename = os.path.join(
            args.log_dir,
            'tadc_import_validator_{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
        )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            with CSVFileValidator(
                    csv_file=args.csv_file,
                    header_rows=args.header_rows,
                    output_dir=args.output_dir,
                    old_date_format=args.old_date_format,
                    fix_missing=args.fix_missing) as validator:
                validator.validate_file()
                log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
                log.info("Log written to {}:".format(log_filename))
                log.info("Fixed data is in: {}".format(validator.get_fixed_filename()))
开发者ID:talis,项目名称:tadc-import-validator-tool,代码行数:49,代码来源:command_line.py

示例3: Fibratus

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
class Fibratus():

    """Fibratus entrypoint.

    Setup the core components including the kernel
    event stream collector and the tracing controller.
    At this point the system handles are also being
    enumerated.

    """
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository, self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0

    def run(self):

        @atexit.register
        def _exit():
            self.stop_ktrace()

        self.kcontroller.start_ktrace(etw.KERNEL_LOGGER_NAME, self.ktrace_props)

        def on_kstream_open():
            if self._filament is None:
                IO.write_console('Done!                               ')
        self.kevt_streamc.set_kstream_open_callback(on_kstream_open)
        self._open_kstream()

    def _open_kstream(self):
        try:
            self.kevt_streamc.open_kstream(self._on_next_kevent)
        except Exception as e:
            with self.file_handler.applicationbound():
                self.logger.error(e)
        except KeyboardInterrupt:
            self.stop_ktrace()

    def stop_ktrace(self):
        IO.write_console('Stopping fibratus...')
        if self._filament:
            self._filament.close()
        self.kcontroller.stop_ktrace(self.ktrace_props)
        self.kevt_streamc.close_kstream()

    def add_filters(self, kevent_filters):
        if len(kevent_filters) > 0:
            self.filters_count = len(kevent_filters)
            # include the basic filters
            # that are essential to the
            # rest of kernel events
            self.kevt_streamc.add_kevent_filter(ENUM_PROCESS)
            self.kevt_streamc.add_kevent_filter(ENUM_THREAD)
            self.kevt_streamc.add_kevent_filter(ENUM_IMAGE)
            self.kevt_streamc.add_kevent_filter(REG_CREATE_KCB)
            self.kevt_streamc.add_kevent_filter(REG_DELETE_KCB)

            # these kevents are necessary for consistent state
            # of the trace. If the user doesn't include them
            # in a filter list, then we do the job but set the
            # kernel event type as not eligible for rendering
            if not KEvents.CREATE_PROCESS in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_PROCESS)
                self.requires_render[CREATE_PROCESS] = False
            else:
                self.requires_render[CREATE_PROCESS] = True

            if not KEvents.CREATE_THREAD in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_THREAD)
                self.requires_render[CREATE_THREAD] = False
#.........这里部分代码省略.........
开发者ID:chubbymaggie,项目名称:fibratus,代码行数:103,代码来源:fibratus_entrypoint.py

示例4: ArgumentParser

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
    from logbook import FileHandler
    from logbook import Logger
    from argparse import ArgumentParser
    import sys
    parser = ArgumentParser()
    logpath = './log/'
    parser.add_argument('--log', nargs=1, help='log path')
    parser.add_argument('--version', nargs=1, help='maintain version')
    args = parser.parse_args(sys.argv[1:])
    logfilepath = logpath + args.log[0]
    maintain_version = args.version[0]
    log_handler = FileHandler(logfilepath)
    logbk = Logger('Token Maintain')

    with log_handler.applicationbound():
        logbk.info('maintain prepare')

        at_least = AT_LEAST_TOKEN_COUNT
        max_tokens_redis_limit = MAX_TOKENS_IN_REDIS

        logbk.info('maintain begin')

        # 认证新用户,并将access_token加入mongodb,redis从mongodb导入新token,不重置已有token 的 req_count
        if maintain_version == 'addatoken':
            print 'generate new token, write to mongo, push to redis without reset request count'
            generate_api_access_token(logbk)
            add_without_reset_req_count(max_tokens_redis_limit, logbk)

        # 将mongodb中所有access_token加入redis,并重置已有token 的 req_count
        if maintain_version == 'addalltoken':
开发者ID:lijiahong,项目名称:utils4scrapy,代码行数:32,代码来源:tk_maintain.py

示例5: main

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
def main():
    """Shows basic usage of the Google Drive API.

    Creates a Google Drive API service object and outputs the names and IDs
    for up to 10 files.
    """

    log_filename = os.path.join(
        args.log_dir,
        'google-drive-to-s3-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            s3 = boto3.resource('s3')

            # load up a match file if we have one.
            if args.match_file:
                with open(args.match_file, 'r') as f:
                    match_filenames = f.read().splitlines()
            else:
                match_filenames = None

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.folder_id),
                fields="nextPageToken, files(id, name)"
            )

            # make sure our S3 Key prefix has a trailing slash
            key_prefix = ensure_trailing_slash(args.key_prefix)

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        if we_should_process_this_file(this_file['name'], match_filenames):
                            log.info(u"#== Processing {} file number {} on page {}. {} files processed.".format(
                                this_file['name'],
                                page_file_counter,
                                page_counter,
                                file_counter
                            ))

                            # download the file
                            download_request = drive_service.files().get_media(fileId=this_file['id'])
                            fh = io.BytesIO()  # Using an in memory stream location
                            downloader = MediaIoBaseDownload(fh, download_request)
                            done = False
                            pbar = InitBar(this_file['name'])
                            while done is False:
                                status, done = downloader.next_chunk()
                                pbar(int(status.progress()*100))
                                # print("\rDownload {}%".format(int(status.progress() * 100)))
                            del pbar

                            # upload to bucket
                            log.info(u"Uploading to S3")
                            s3.Bucket(args.bucket).put_object(
                                Key="{}{}".format(key_prefix, this_file['name']),
                                Body=fh.getvalue(),
                                ACL='public-read'
                            )
                            log.info(u"Uploaded to S3")
                            fh.close()  # close the file handle to release memory
                        else:
                            log.info(u"Do not need to process {}".format(this_file['name']))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
#.........这里部分代码省略.........
开发者ID:timhodson,项目名称:google-drive-file-download,代码行数:103,代码来源:google-drive-to-s3.py

示例6: main

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
def main():
    """
    Copy a folder from Source to Target

    """

    log_filename = os.path.join(
        args.log_dir,
        'copy-google-drive-folder-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.source_folder_id),
                fields="nextPageToken, files(id, name, mimeType)"
            )

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        log.info(u"#== Processing {} {} file number {} on page {}. {} files processed.".format(
                            this_file['mimeType'],
                            this_file['name'],
                            page_file_counter,
                            page_counter,
                            file_counter
                        ))

                        # if not a folder
                        if this_file['mimeType'] != 'application/vnd.google-apps.folder':
                            # Copy the file
                            new_file = {'title': this_file['name']}
                            copied_file = drive_service.files().copy(fileId=this_file['id'], body=new_file).execute()
                            # move it to it's new location
                            drive_service.files().update(
                                fileId=copied_file['id'],
                                addParents=args.target_folder_id,
                                removeParents=args.source_folder_id
                            ).execute()
                        else:
                            log.info(u"Skipped Folder")

                else:
                    log.info(u"Skipping Page {}".format(page_counter))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
                    break

                # request the next page of files
                request = files.list_next(request, file_page)

            log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
            log.info("Log written to {}:".format(log_filename))
开发者ID:timhodson,项目名称:google-drive-file-download,代码行数:89,代码来源:copy-google-drive-folder.py

示例7: set_req

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]
        for k,v in headers.iteritems():
            self.req.add_header(k,v)

    def set_req(self):
        self.req = urllib2.Request(self.url, urllib.urlencode(self.data))
        #self.req = urllib2.Request(self.url)

    def send(self):
        self.set_req()
        return urllib2.urlopen(self.req)



if __name__ == "__main__":

    logger = Logger("TicketchangeToInfluxdb")
    logfile = "ticketchangetoinfluxdb.log"
    fh = FileHandler(logfile,"a")
    fh.applicationbound()
    fh.push_application()

    client = Client()
    client.test()
    adapter = Adapter()
    client.set_adapter(adapter)
    a =client.get_adapter()
    a.test()

    print("This is just a test.")
    logger.info("Testing logging.")
开发者ID:folpindo,项目名称:myticketlistener,代码行数:32,代码来源:ticketchangetoinfluxdb.py

示例8: Pipeline

# 需要导入模块: from logbook import FileHandler [as 别名]
# 或者: from logbook.FileHandler import applicationbound [as 别名]

#.........这里部分代码省略.........
            yield
        finally:
            # restore the normal cluster_view context manager on exit
            self._cluster_view = old_view_factory
            try:
                cm.gen.next()  # clean up the view we've been using
            except StopIteration:
                pass

    def _transaction_filter(self, targets):
        """Filter the `jobs` appropriately based on whether `targets` is a
        function, str, or list of str"""
        # TODO there has got to be a better way to do this -____-
        to_run = []
        dont_run = []
        if callable(targets):
            f = targets
            for job in self.jobs:
                if f(job):
                    dont_run.append(job)
                else:
                    to_run.append(job)
            return to_run, dont_run
        elif isinstance(targets, str):
            targets = [targets]
        elif not isinstance(targets, list):
            TypeError("transaction targets must be list, str, or callable")
        for job in self.jobs:
            canonical_targets = fs.canonicalize(job, targets)
            if all((os.path.exists(target)
                    for target in canonical_targets)):
                info = ("Skipping transaction for job {} targets {} "
                        "already present")
                with self.handler.applicationbound():
                    self.logger.info(info.format(job["description"],
                                                 canonical_targets))
                dont_run.append(job)
            else:
                # targets not present for this job
                to_run.append(job)
        return to_run, dont_run

    @contextmanager
    def transaction(self, targets):
        """Do some work "transacationally", in the sense that nothing done
        inside a ``transaction`` block will be "commited" to the
        workdir unless it all succeeds without error. The work done
        inside a transaction is also idempotent in that you must
        specify a ``target`` file or files for the tranasaction and it
        will not be run if the target exists already. This is perhaps
        best illustrated by a simple example::

            with p.transaction("{workdir}/example.txt"):
                p.run("{tmpdir}/touch example.txt")

        This will result in a file ``B.txt`` in each job's
        ``workdir``. The creation of this file will be skipped if the
        code is run again and the file already exists. This is
        obviously a silly example, but the code inside the `with`
        block can be any arbitrarily complex series of operations
        which produces a set of target output files at the end. This
        is a powerful feature in that it allows pipelines to be
        restratable: if a pipeline crashes for some reason but you
        have it's major sections wrapped in ``transaction`` blocks,
        you can simple run it again and pick up where you left off
        without redoing any work. The transaction blocks guarentee
开发者ID:LabAdvComp,项目名称:dish,代码行数:70,代码来源:pipeline.py


注:本文中的logbook.FileHandler.applicationbound方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。