当前位置: 首页>>代码示例>>Python>>正文


Python logger.debug函数代码示例

本文整理汇总了Python中weaver.logger.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compile

    def compile(self):
        """ Compile Abstractions to generate tasks and output file lists. """
        # Compile Abstractions and SubNests to ensure they have generated
        # tasks.
        debug(D_NEST, 'Compiling Abstractions and SubNests for {0}'.format(self))
        for future, is_nest in self.futures:
            if is_nest:
                with future:
                    future.compile()
                future()
            else:
                future.compile()

        # Perform optimizations.
        debug(D_NEST, 'Optimizing tasks for {0}'.format(self))
        self._optimize_nested_abstractions()
        self._optimize_inline_tasks()

        # Emit stored tasks to workflow DAG using engine.
        debug(D_NEST, 'Emitting tasks for {0}'.format(self))
        for task in self.tasks:
            self.emit_task(*task)

        # Emit variables and exports
        debug(D_NEST, 'Emitting variables for {0}'.format(self))
        self.emit_variables()

        debug(D_NEST, 'Emitting exports for {0}'.format(self))
        self.emit_exports()
开发者ID:Baguage,项目名称:cctools,代码行数:29,代码来源:nest.py

示例2: __init__

    def __init__(self, dataset, *filters, **parameters):
        Dataset.__init__(self, cursor=dataset.c)
        self._dataset    = dataset
        self._filters    = filters
        self._parameters = parameters

        debug(D_DATASET, 'Created Query: {0}'.format(self.cache_path))
开发者ID:Baguage,项目名称:cctools,代码行数:7,代码来源:dataset.py

示例3: compile

    def compile(self):
        """ Compile script in the specified working directory. """
        # Save active script instance and set this one as active
        work_dir = self.output_directory

        # Add nest path and path to script to Python module path to allow
        # for importing modules outside of $PYTHONPATH
        sys.path.insert(0, os.path.abspath(os.path.dirname(work_dir)))

        # Load built-ins if specified on command line.  If built-ins are
        # not automatically loaded by the Script object, then the user must
        # load them manually in their Weaver scripts using the standard
        # Python import facilities.
        if self.import_builtins:
            self._import('abstraction', ABSTRACTIONS)
            self._import('dataset', DATASETS)
            self._import('function', FUNCTIONS)
            self._import('nest', NESTS)
            self._import('options', OPTIONS)
            self._import('stack', STACKS)

        # Execute nest
        with Nest(work_dir, wrapper=self.engine_wrapper) as nest:
            with self.options:
                try:
                    self.function(*self.arguments)
                    nest.compile()
                except Exception as e:
                    fatal(D_SCRIPT, 'Error compiling script: {0}'.format(e), print_traceback=True)

                if self.execute_dag:
                    debug(D_SCRIPT, 'Executing generated DAG {0} with {1}'.format(
                        nest.dag_path, nest.path))
                    nest.execute(self.engine_arguments, exit_on_failure=True)
开发者ID:FAANG,项目名称:faang-methylation,代码行数:34,代码来源:function.py

示例4: __init__

    def __init__(self, work_dir=None, dag_path=None, stash=None, barrier=None,
        wrapper=None, track_imports=True, track_exports=True):
        self.work_dir = work_dir or '.'
        self.tasks    = []
        self.parent   = CurrentNest()
        if self.parent:
            self.work_dir = os.path.join(self.parent.work_dir, self.work_dir)
        self.stash    = stash or Stash(root=os.path.join(self.work_dir, '_Stash'))

        if not os.path.exists(self.work_dir):
            make_directory(self.work_dir)

        Makeflow.__init__(self, wrapper=wrapper,
            track_imports=track_imports, track_exports=track_exports)

        self.dag_path = dag_path or os.path.join(self.work_dir, 'Makeflow')
        self.dag_file = open(self.dag_path, 'w')
        self.includes.add(self.dag_path)
        # TODO: fix work_dir so it can be translated by makeflow_link

        if barrier:
            self.includes.update(parse_input_list(barrier))

        # Since Abstractions and SubNests are not compiled immediately, these
        # objects must regster with their parent Nest, who will compile them in
        # the order that they are registered to ensure proper semantics.
        self.futures  = []

        if self.parent:
            debug(D_NEST, 'Register child {0} with parent {1}'.format(
                self, self.parent))
            self.parent.futures.append((self, True))

        debug(D_NEST, 'Created {0}'.format(self))
开发者ID:Baguage,项目名称:cctools,代码行数:34,代码来源:nest.py

示例5: execute

    def execute(self, arguments=None, exit_on_failure=False):
        """ Execute DAG using Makeflow. """
        if self.dag_file is None:
            raise WeaverError(D_ENGINE, 'Cannot execute an empty DAG')

        # Ensure that DAG is written to disk.
        self.dag_file.flush()

        # Execute emitted DAG from the current Nest path.
        try:
            command_list = [self.path, os.path.relpath(self.dag_path, self.work_dir)]
            if self.wrapper:
                command_list.insert(0, self.wrapper)
            if arguments:
                command_list.extend(arguments.split())
            debug(D_ENGINE, 'Executing DAG {0} using {1} in {2}'.format(
                self.dag_path, self.path, self.work_dir))
            subprocess.check_call(command_list, cwd=self.work_dir)
        except subprocess.CalledProcessError as e:
            if exit_on_failure:
                log_func = fatal
            else:
                log_func = warn

            log_func(D_ENGINE, 'Failed to execute DAG {0} using {1}:\n{2}'.format(
                self.dag_path, self.path, e))
开发者ID:Baguage,项目名称:cctools,代码行数:26,代码来源:engine.py

示例6: _query

    def _query(self, filters, **parameters):
        debug(D_DATASET, 'Querying Dataset: {0}'.format(self.cache_path))
        try:
            limit = parameters['limit']
        except KeyError:
            limit = None

        # For each item in the Dataset, apply each filter; if all filters
        # succeed, then yield item.
        count = 0
        for o in iter(self):
            do_yield = True

            for f in filters:
                if not f(o):
                    do_yield = False
                    break

            if do_yield:
                count += 1
                yield o

            # Break out if we reach limit.
            if limit is not None and count == limit:
                break
开发者ID:Baguage,项目名称:cctools,代码行数:25,代码来源:dataset.py

示例7: _optimize_inline_tasks

    def _optimize_inline_tasks(self):
        """ Group tasks by abstraction and function and then break them into
        sub-groups and schedule the sub-groups has sub DAGs.
        """
        if CurrentScript().inline_tasks <= 1:
            return

        debug(D_NEST, 'Inlining tasks for {0}'.format(self))

        # Group tasks into bins based on Function.
        task_dict = collections.defaultdict(list)
        for task in self.tasks:
            abstraction = task[0]
            function    = task[1]
            task_dict[(abstraction, function)].append(task)

        # For each set of tasks, split the set into small sub-groups; for each
        # sub-group, create a new InlineNest and schedule the tasks there.
        self.tasks = []
        for (abstraction, function), tasks in task_dict.items():
            inline_tasks = max(CurrentScript().inline_tasks, abstraction.group)
            if inline_tasks < len(tasks):
                for group in groups(tasks, inline_tasks):
                    with InlineNest() as inline_nest:
                        for task in group:
                            inline_nest.schedule(*task)
                        inline_nest.compile()
                    with abstraction.options:
                        inline_nest()
            else:
                for task in tasks:
                    self.tasks.append(task)
开发者ID:Baguage,项目名称:cctools,代码行数:32,代码来源:nest.py

示例8: run_concurrent

def run_concurrent(func_name, tasks, *func_args):
    debug(D_USER, 'Generating Concurrent Pattern with Function {0}'.format(func_name))

    tasks     = int(tasks)
    arguments = map(int, func_args)
    function  = make_function(func_name, *arguments)

    Iterate(function, tasks, '{NUMBER}.output')
开发者ID:bbockelm,项目名称:cctools,代码行数:8,代码来源:workbench.py

示例9: connect

    def connect(self):
        debug(D_DATASET, 'Connecting to {0}'.format(self))

        self.db_conn = MySQLConnect(
            host        = self.db_host,
            db          = self.db_name,
            user        = self.db_user,
            passwd      = self.db_pass,
            cursorclass = MySQLSSDictCursor)
开发者ID:Baguage,项目名称:cctools,代码行数:9,代码来源:dataset.py

示例10: __init__

    def __init__(self, executable, cmd_format=None, find_dirs=None,
        environment=None):
        self.cmd_format  = cmd_format or Function.CMD_FORMAT
        self.path        = find_executable(executable, find_dirs)
        self.environment = environment or dict()
        self.includes    = set([self.path])

        debug(D_FUNCTION, 'Created Function {0}({1}, {2})'.format(
            type_str(self), self.path, self.cmd_format))
开发者ID:FAANG,项目名称:faang-methylation,代码行数:9,代码来源:function.py

示例11: _generate

    def _generate(self):
        with self:
            debug(D_ABSTRACTION, 'Generating Abstraction {0}'.format(self))

            function = parse_function(self.function)
            includes = parse_input_list(self.includes)
            
            # First format inputs and figure out the number of iteration to perform
            group_size = 0
            inputs = []
            if isinstance(self.inputs, list):
                # If inputs is a matrix 
                if isinstance(self.inputs[0], list):
                    for i, ingroup in enumerate(self.inputs):
                        inputs.append(parse_input_list(ingroup))
                        if group_size == 0: group_size = len(ingroup)
                        if len(ingroup) != group_size:
                            raise IOError("Iteration group size are different between inputs!")
                # If inputs is a simple list
                else:
                    group_size = len(self.inputs)
                    inputs = parse_input_list(self.inputs)
            # If inputs is a string
            else:
                group_size = 1
                inputs = parse_input_list(self.inputs)            
            
            for iter in range(group_size):
                
                iteration_inputs = []
                if isinstance(inputs[0], list):
                    for i, input in enumerate(inputs):
                        iteration_inputs.append(input[iter])
                else:
                    iteration_inputs.append(inputs[iter])
                    
                input_pattern = self._longestCommonSubstr(list(map(os.path.basename, list(map(str, iteration_inputs)))))
                
                iteration_outputs = []
                if isinstance(self.outputs, list):
                    # If outputs is a matrix
                    if isinstance(self.outputs[0], list):
                        for i, outgroup in enumerate(self.outputs):
                            iteration_outputs.append(outgroup[iter])
                    # If inputs is a simple list and a motif table
                    elif isinstance(self.outputs[0], str) and '{' in self.outputs[0]:
                        for motif in self.outputs:
                            iteration_outputs.extend(parse_output_list(motif, input_pattern))
                    # If a simple string table
                    elif isinstance(self.outputs[0], str):
                        iteration_outputs = parse_output_list(self.outputs[iter], input_pattern)
                # If inputs is a string
                else:
                    iteration_outputs = parse_output_list(self.outputs, input_pattern)
                
                with Options(local=self.options.local):
                    yield function(iteration_inputs, iteration_outputs, None, includes)
开发者ID:FAANG,项目名称:faang-methylation,代码行数:57,代码来源:abstraction.py

示例12: parse_output_list

def parse_output_list(output_list=None, input_list=None):
    """ Return an :func:`~weaver.util.iterable` object of output files.

    If `output_list` is ``None``, then return ``[]``.  If `output_list` is a
    string template, then use it to generate a list of :class:`File`
    objects.  If `output_list` is already an :func:`~weaver.util.iterable`,
    then map :class:`File` to `output_list` and return it.

    This means that `output_list` must be one of the following:

    1. ``None`` to leave it to the caller to generate an output file object.
    2. A string object to be used as a template.
    3. An :func:`~weaver.util.iterable` object (ex. list, iterator, etc.).

    If `output_list` is a string template, then it may have the following
    fields:

    - `{fullpath}`, `{FULL}`         -- Full input file path.
    - `{basename}`, `{BASE}`         -- Base input file name.
    - `{fullpath_woext}`, `{FULLWE}` -- Full input file path without extension
    - `{basename_woext}`, `{BASEWE}` -- Base input file name without extension

    """
    debug(D_DATA, 'Parsing output list')
    if output_list is None:
        return []

    if isinstance(output_list, str) or isinstance(output_list, File):
        # If input list is empty or output list is not a format string, then
        # return list of single output file.
        # TODO: support single {stash}
        if not input_list or not '{' in str(output_list):
            return [MakeFile(output_list)]

        nest = CurrentNest()
        return [MakeFile(str(output_list).format(
                    fullpath       = input,
                    FULL           = input,
                    i              = '{0:05X}'.format(i),
                    NUMBER         = '{0:05X}'.format(i),
                    stash          = next(nest.stash) if '{stash}' in output_list else '',
                    fullpath_woext = os.path.splitext(input)[0],
                    FULL_WOEXT     = os.path.splitext(input)[0],
                    basename       = os.path.basename(input),
                    BASE           = os.path.basename(input),
                    basename_woext = os.path.splitext(os.path.basename(input))[0] if os.path.splitext(os.path.basename(input))[1] != ".gz" else os.path.splitext(os.path.splitext(os.path.basename(input))[0])[0],
                    BASE_WOEXT     = os.path.splitext(os.path.basename(input))[0] if os.path.splitext(os.path.basename(input))[1] != ".gz" else os.path.splitext(os.path.splitext(os.path.basename(input))[0])[0]))
                for i, input in enumerate(parse_string_list(input_list))]

    if iterable(output_list):
        return [MakeFile(o) for o in parse_object_list(output_list)]

    raise WeaverError(D_DATA,
        'Could not parse output argument: {0}'.format(output_list))

# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
开发者ID:FAANG,项目名称:faang-methylation,代码行数:56,代码来源:data.py

示例13: run_chained

def run_chained(func_name, tasks, *func_args):
    debug(D_USER, 'Generating Chained Pattern with Function {0}'.format(func_name))

    tasks     = int(tasks)
    arguments = map(int, func_args)
    function  = make_function(func_name, *arguments)

    output = None
    for task in range(tasks):
        output = function(output, '{0:04d}.output'.format(task))
开发者ID:bbockelm,项目名称:cctools,代码行数:10,代码来源:workbench.py

示例14: run_fanout

def run_fanout(func_name, tasks, bytes, *func_args):
    debug(D_USER, 'Generating FanOut Pattern with Function {0}'.format(func_name))

    tasks     = int(tasks)
    bytes     = int(bytes)
    input     = generate_input_file(bytes, 'fanout.input')
    arguments = map(int, func_args)
    function  = make_function(func_name, *arguments)

    Iterate(function, tasks, '{NUMBER}.output', includes=input)
开发者ID:bbockelm,项目名称:cctools,代码行数:10,代码来源:workbench.py

示例15: _generate

    def _generate(self):
        with self:
            debug(D_ABSTRACTION, 'Generating Abstraction {0}'.format(self))

            function = parse_function(self.function)
            inputs_a = parse_input_list(self.inputs_a)
            inputs_b = parse_input_list(self.inputs_b)
            includes = parse_input_list(self.includes)

            # If native is enabled, then use allpairs_master, otherwise
            # generate tasks as part of the DAG.
            #
            # Note: parse_output_list flattens inputs, so we need to manually
            # translate pairs into a single string.
            if self.native:
                # Store inputs A and B lists as required by allpairs_master
                inputs_a_file = next(self.nest.stash)
                with open(inputs_a_file, 'w') as fs:
                    for input_file in map(str, inputs_a):
                        fs.write(input_file + '\n')

                inputs_b_file = next(self.nest.stash)
                with open(inputs_b_file, 'w') as fs:
                    for input_file in map(str, inputs_b):
                        fs.write(input_file + '\n')

                inputs  = [inputs_a_file, inputs_b_file]
                outputs = parse_output_list(self.outputs,
                            map(lambda p: '_'.join(
                                map(lambda s: os.path.basename(str(s)), p)),inputs))

                # Schedule allpairs_master
                with Options(local=True, collect=[i] if self.collect else None):
                    allpairs_master = parse_function(
                        'allpairs_master -p {0} {{IN}} {{ARG}} > {{OUT}}'.format(self.port))
                    yield allpairs_master(inputs, outputs, function.path, includes + [function.path])
            else:
                inputs  = list(itertools.product(inputs_a, inputs_b))
                outputs = parse_output_list(self.outputs,
                            map(lambda p: '_'.join(
                                map(lambda s: os.path.basename(str(s)), p)),inputs))

                # We use a wrapper script to collect the output of the
                # comparison and put in {INPUT_A} {INPUT_B} {OUTPUT} format, as
                # used by allpairs_master.
                for i, o in zip(inputs, outputs):
                    tmp_output = next(self.nest.stash)

                    with Options(local=self.options.local, collect=[i] if self.collect else None):
                        output = function(i, tmp_output, None, includes)

                    # Wrapper script should run locally and we should always
                    # try to collect the temporary intermediate output file.
                    with Options(local=True, collect=[tmp_output]):
                        yield AllPairsCompareWrapper(output, o, map(lambda p: os.path.basename(str(p)), i), None)
开发者ID:Baguage,项目名称:cctools,代码行数:55,代码来源:abstraction.py


注:本文中的weaver.logger.debug函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。