当前位置: 首页>>代码示例>>Python>>正文


Python overrides.overrides方法代码示例

本文整理汇总了Python中overrides.overrides方法的典型用法代码示例。如果您正苦于以下问题:Python overrides.overrides方法的具体用法?Python overrides.overrides怎么用?Python overrides.overrides使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在overrides的用法示例。


在下文中一共展示了overrides.overrides方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_overrides

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def load_overrides(path, custom=False):
    """ Overrides loader for Python files

    Note:
        Overrides must be in an ``overrides`` dictionary.

    Args:
        path    (str): Path to Python file to be loaded
        custom (bool): Boolean flag to specify if this is a custom overrides file
    """
    try:
        if custom:
            sys.path.append(path)
            from overrides import overrides
            log.debug("Imported overrides: %s", repr(overrides))
            for provider in overrides:
                update_definitions(provider, overrides[provider])
            log.info("Successfully loaded overrides from %s", os.path.join(path, "overrides.py"))
    except Exception as e:
        import traceback
        log.error("Failed importing %soverrides: %s", "custom " if custom else "", repr(e))
        map(log.error, traceback.format_exc().split("\n")) 
开发者ID:elgatito,项目名称:script.elementum.burst,代码行数:24,代码来源:definitions.py

示例2: overrides

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def overrides(method: _WrappedMethod) -> _WrappedMethod:
    """Decorator to indicate that the decorated method overrides a method in
    superclass.
    The decorator code is executed while loading class. Using this method
    should have minimal runtime performance implications.

    This is based on my idea about how to do this and fwc:s highly improved
    algorithm for the implementation fwc:s
    algorithm : http://stackoverflow.com/a/14631397/308189
    my answer : http://stackoverflow.com/a/8313042/308189

    How to use:
    from overrides import overrides

    class SuperClass(object):
        def method(self):
          return 2

    class SubClass(SuperClass):

        @overrides
        def method(self):
            return 1

    :raises  AssertionError if no match in super classes for the method name
    :return  method with possibly added (if the method doesn't have one)
        docstring from super class
    """
    setattr(method, "__override__", True)
    for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
        if hasattr(super_class, method.__name__):
            super_method = getattr(super_class, method.__name__)
            if hasattr(super_method, "__finalized__"):
                finalized = getattr(super_method, "__finalized__")
                if finalized:
                    raise AssertionError('Method "%s" is finalized' % method.__name__)
            if not method.__doc__:
                method.__doc__ = super_method.__doc__
            return method
    raise AssertionError('No super class method found for "%s"' % method.__name__) 
开发者ID:mkorpela,项目名称:overrides,代码行数:42,代码来源:overrides.py

示例3: find_learning_rate_from_args

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def find_learning_rate_from_args(args: argparse.Namespace) -> None:
    """
    Start learning rate finder for given args
    """
    params = Params.from_file(args.param_path, args.overrides)
    find_learning_rate_model(
        params,
        args.serialization_dir,
        start_lr=args.start_lr,
        end_lr=args.end_lr,
        num_batches=args.num_batches,
        linear_steps=args.linear,
        stopping_factor=args.stopping_factor,
        force=args.force,
    ) 
开发者ID:allenai,项目名称:allennlp,代码行数:17,代码来源:find_learning_rate.py

示例4: _get_predictor

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def _get_predictor(args: argparse.Namespace) -> Predictor:
    check_for_gpu(args.cuda_device)
    archive = load_archive(
        args.archive_file,
        weights_file=args.weights_file,
        cuda_device=args.cuda_device,
        overrides=args.overrides,
    )

    return Predictor.from_archive(
        archive, args.predictor, dataset_reader_to_load=args.dataset_reader_choice
    ) 
开发者ID:allenai,项目名称:allennlp,代码行数:14,代码来源:predict.py

示例5: from_file

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def from_file(
        cls, params_file: str, params_overrides: str = "", ext_vars: dict = None
    ) -> "Params":
        """
        Load a `Params` object from a configuration file.

        # Parameters

        params_file: `str`

            The path to the configuration file to load.

        params_overrides: `str`, optional

            A dict of overrides that can be applied to final object.
            e.g. {"model.embedding_dim": 10}

        ext_vars: `dict`, optional

            Our config files are Jsonnet, which allows specifying external variables
            for later substitution. Typically we substitute these using environment
            variables; however, you can also specify them here, in which case they
            take priority over environment variables.
            e.g. {"HOME_DIR": "/Users/allennlp/home"}
        """
        if ext_vars is None:
            ext_vars = {}

        # redirect to cache, if necessary
        params_file = cached_path(params_file)
        ext_vars = {**_environment_variables(), **ext_vars}

        file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars))

        overrides_dict = parse_overrides(params_overrides)
        param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict)

        return cls(param_dict) 
开发者ID:allenai,项目名称:allennlp,代码行数:40,代码来源:params.py

示例6: overrides

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def overrides(method):
    """Decorator to indicate that the decorated method overrides a method in
    superclass.

    The decorator code is executed while loading class. Using this method should
    have minimal runtime performance implications.

    This is based on my idea about how to do this and fwc:s highly improved
    algorithm for the implementation
    fwc:s algorithm : http://stackoverflow.com/a/14631397/308189
    my answer : http://stackoverflow.com/a/8313042/308189

    How to use:
    from overrides import overrides

    class SuperClass:

        def method(self):
            return 2

    class SubClass(SuperClass):

        @overrides
        def method(self):
            return 1

    :raises  AssertionError if no match in super classes for the method name
    :return  method with possibly added (if the method doesn't have one)
     docstring from super class
    """
    # nop for now due to py3 compatibility
    return method
    # for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
    #     if hasattr(super_class, method.__name__):
    #         if not method.__doc__:
    #             method.__doc__ = getattr(super_class, method.__name__).__doc__
    #         return method
    # raise AssertionError(
    #     'No super class method found for "%s"' % method.__name__) 
开发者ID:rlworkgroup,项目名称:gym-sawyer,代码行数:41,代码来源:overrides.py

示例7: make_output_human_readable

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def make_output_human_readable(
        self, output_dict: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """
        This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
        time, to finalize predictions.  This is (confusingly) a separate notion from the "decoder"
        in "encoder/decoder", where that decoder logic lives in ``TransitionFunction``.

        This method trims the output predictions to the first end symbol, replaces indices with
        corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.
        """
        action_mapping = output_dict["action_mapping"]
        best_actions = output_dict["best_action_sequence"]
        debug_infos = output_dict["debug_info"]
        batch_action_info = []
        for batch_index, (predicted_actions, debug_info) in enumerate(
            zip(best_actions, debug_infos)
        ):
            instance_action_info = []
            for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
                action_info = {}
                action_info["predicted_action"] = predicted_action
                considered_actions = action_debug_info["considered_actions"]
                probabilities = action_debug_info["probabilities"]
                actions = []
                for action, probability in zip(considered_actions, probabilities):
                    if action != -1:
                        actions.append((action_mapping[(batch_index, action)], probability))
                actions.sort()
                considered_actions, probabilities = zip(*actions)
                action_info["considered_actions"] = considered_actions
                action_info["action_probabilities"] = probabilities
                action_info["utterance_attention"] = action_debug_info.get("question_attention", [])
                instance_action_info.append(action_info)
            batch_action_info.append(instance_action_info)
        output_dict["predicted_actions"] = batch_action_info
        return output_dict 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:39,代码来源:atis_semantic_parser.py

示例8: make_output_human_readable

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def make_output_human_readable(
        self, output_dict: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """
        This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
        time, to finalize predictions.  This is (confusingly) a separate notion from the "decoder"
        in "encoder/decoder", where that decoder logic lives in the ``TransitionFunction``.

        This method trims the output predictions to the first end symbol, replaces indices with
        corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
        """
        action_mapping = output_dict["action_mapping"]
        best_actions = output_dict["best_action_sequence"]
        debug_infos = output_dict["debug_info"]
        batch_action_info = []
        for batch_index, (predicted_actions, debug_info) in enumerate(
            zip(best_actions, debug_infos)
        ):
            instance_action_info = []
            for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
                action_info = {}
                action_info["predicted_action"] = predicted_action
                considered_actions = action_debug_info["considered_actions"]
                probabilities = action_debug_info["probabilities"]
                actions = []
                for action, probability in zip(considered_actions, probabilities):
                    if action != -1:
                        actions.append((action_mapping[(batch_index, action)], probability))
                actions.sort()
                considered_actions, probabilities = zip(*actions)
                action_info["considered_actions"] = considered_actions
                action_info["action_probabilities"] = probabilities
                action_info["question_attention"] = action_debug_info.get("question_attention", [])
                instance_action_info.append(action_info)
            batch_action_info.append(instance_action_info)
        output_dict["predicted_actions"] = batch_action_info
        return output_dict 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:39,代码来源:wikitables_semantic_parser.py

示例9: make_output_human_readable

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def make_output_human_readable(
        self, output_dict: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """
        This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
        time, to finalize predictions.  This is (confusingly) a separate notion from the "decoder"
        in "encoder/decoder", where that decoder logic lives in ``TransitionFunction``.

        This method trims the output predictions to the first end symbol, replaces indices with
        corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.
        """
        action_mapping = output_dict["action_mapping"]
        best_actions = output_dict["best_action_sequence"]
        debug_infos = output_dict["debug_info"]
        batch_action_info = []
        for batch_index, (predicted_actions, debug_info) in enumerate(
            zip(best_actions, debug_infos)
        ):
            instance_action_info = []
            for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
                action_info = {}
                action_info["predicted_action"] = predicted_action
                considered_actions = action_debug_info["considered_actions"]
                probabilities = action_debug_info["probabilities"]
                actions = []
                for action, probability in zip(considered_actions, probabilities):
                    if action != -1:
                        actions.append((action_mapping[batch_index][action], probability))
                actions.sort()
                considered_actions, probabilities = zip(*actions)
                action_info["considered_actions"] = considered_actions
                action_info["action_probabilities"] = probabilities
                action_info["utterance_attention"] = action_debug_info.get("question_attention", [])
                instance_action_info.append(action_info)
            batch_action_info.append(instance_action_info)
        output_dict["predicted_actions"] = batch_action_info
        return output_dict 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:39,代码来源:text2sql_parser.py

示例10: overrides

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def overrides(method):
    """Decorator to indicate that the decorated method overrides a method in superclass.
    The decorator code is executed while loading class. Using this method should have minimal runtime performance
    implications.
    This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation
    fwc:s algorithm : http://stackoverflow.com/a/14631397/308189
    my answer : http://stackoverflow.com/a/8313042/308189
    How to use:
    from overrides import overrides
    class SuperClass(object):
        def method(self):
            return 2
    class SubClass(SuperClass):
        @overrides
        def method(self):
            return 1
    :raises  AssertionError if no match in super classes for the method name
    :return  method with possibly added (if the method doesn't have one) docstring from super class
    """
    # nop for now due to py3 compatibility
    return method
    # for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
    #     if hasattr(super_class, method.__name__):
    #         if not method.__doc__:
    #             method.__doc__ = getattr(super_class, method.__name__).__doc__
    #         return method
    # raise AssertionError('No super class method found for "%s"' % method.__name__) 
开发者ID:ml3705454,项目名称:mapr2,代码行数:29,代码来源:overrides.py

示例11: add_subparser

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:

        description = """Find a learning rate range where loss decreases quickly
                         for the specified model and dataset."""
        subparser = parser.add_parser(
            self.name, description=description, help="Find a learning rate range."
        )

        subparser.add_argument(
            "param_path", type=str, help="path to parameter file describing the model to be trained"
        )
        subparser.add_argument(
            "-s",
            "--serialization-dir",
            required=True,
            type=str,
            help="The directory in which to save results.",
        )
        subparser.add_argument(
            "-o",
            "--overrides",
            type=str,
            default="",
            help="a JSON structure used to override the experiment configuration",
        )
        subparser.add_argument(
            "--start-lr", type=float, default=1e-5, help="learning rate to start the search"
        )
        subparser.add_argument(
            "--end-lr", type=float, default=10, help="learning rate up to which search is done"
        )
        subparser.add_argument(
            "--num-batches",
            type=int,
            default=100,
            help="number of mini-batches to run learning rate finder",
        )
        subparser.add_argument(
            "--stopping-factor",
            type=float,
            default=None,
            help="stop the search when the current loss exceeds the best loss recorded by "
            "multiple of stopping factor",
        )
        subparser.add_argument(
            "--linear",
            action="store_true",
            help="increase learning rate linearly instead of exponential increase",
        )
        subparser.add_argument(
            "-f",
            "--force",
            action="store_true",
            required=False,
            help="overwrite the output directory if it exists",
        )

        subparser.set_defaults(func=find_learning_rate_from_args)

        return subparser 
开发者ID:allenai,项目名称:allennlp,代码行数:62,代码来源:find_learning_rate.py

示例12: evaluate_from_args

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
    # Disable some of the more verbose logging statements
    logging.getLogger("allennlp.common.params").disabled = True
    logging.getLogger("allennlp.nn.initializers").disabled = True
    logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)

    # Load from archive
    archive = load_archive(
        args.archive_file,
        weights_file=args.weights_file,
        cuda_device=args.cuda_device,
        overrides=args.overrides,
    )
    config = archive.config
    prepare_environment(config)
    model = archive.model
    model.eval()

    # Load the evaluation data

    # Try to use the validation dataset reader if there is one - otherwise fall back
    # to the default dataset_reader used for both training and validation.
    validation_dataset_reader_params = config.pop("validation_dataset_reader", None)
    if validation_dataset_reader_params is not None:
        dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
    else:
        dataset_reader = DatasetReader.from_params(config.pop("dataset_reader"))
    evaluation_data_path = args.input_file
    logger.info("Reading evaluation data from %s", evaluation_data_path)
    instances = dataset_reader.read(evaluation_data_path)

    embedding_sources = (
        json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
    )

    if args.extend_vocab:
        logger.info("Vocabulary is being extended with test instances.")
        model.vocab.extend_from_instances(instances=instances)
        model.extend_embedder_vocab(embedding_sources)

    instances.index_with(model.vocab)
    data_loader_params = config.pop("validation_data_loader", None)
    if data_loader_params is None:
        data_loader_params = config.pop("data_loader")
    if args.batch_size:
        data_loader_params["batch_size"] = args.batch_size
    data_loader = DataLoader.from_params(dataset=instances, params=data_loader_params)

    metrics = evaluate(model, data_loader, args.cuda_device, args.batch_weight_key)

    logger.info("Finished evaluating.")

    dump_metrics(args.output_file, metrics, log=True)

    return metrics 
开发者ID:allenai,项目名称:allennlp,代码行数:57,代码来源:evaluate.py

示例13: add_subparser

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
        description = """Train the specified model on the specified dataset."""
        subparser = parser.add_parser(self.name, description=description, help="Train a model.")

        subparser.add_argument(
            "param_path", type=str, help="path to parameter file describing the model to be trained"
        )

        subparser.add_argument(
            "-s",
            "--serialization-dir",
            required=True,
            type=str,
            help="directory in which to save the model and its logs",
        )

        subparser.add_argument(
            "-r",
            "--recover",
            action="store_true",
            default=False,
            help="recover training from the state in serialization_dir",
        )

        subparser.add_argument(
            "-f",
            "--force",
            action="store_true",
            required=False,
            help="overwrite the output directory if it exists",
        )

        subparser.add_argument(
            "-o",
            "--overrides",
            type=str,
            default="",
            help="a JSON structure used to override the experiment configuration",
        )

        subparser.add_argument(
            "--node-rank", type=int, default=0, help="rank of this node in the distributed setup"
        )

        subparser.add_argument(
            "--dry-run",
            action="store_true",
            help="do not train a model, but create a vocabulary, show dataset statistics and "
            "other training information",
        )

        subparser.set_defaults(func=train_model_from_args)

        return subparser 
开发者ID:allenai,项目名称:allennlp,代码行数:56,代码来源:train.py

示例14: train_model_from_file

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def train_model_from_file(
    parameter_filename: str,
    serialization_dir: str,
    overrides: str = "",
    recover: bool = False,
    force: bool = False,
    node_rank: int = 0,
    include_package: List[str] = None,
    dry_run: bool = False,
) -> Optional[Model]:
    """
    A wrapper around [`train_model`](#train_model) which loads the params from a file.

    # Parameters

    parameter_filename : `str`
        A json parameter file specifying an AllenNLP experiment.
    serialization_dir : `str`
        The directory in which to save results and logs. We just pass this along to
        [`train_model`](#train_model).
    overrides : `str`
        A JSON string that we will use to override values in the input parameter file.
    recover : `bool`, optional (default=`False`)
        If `True`, we will try to recover a training run from an existing serialization
        directory.  This is only intended for use when something actually crashed during the middle
        of a run.  For continuing training a model on new data, see `Model.from_archive`.
    force : `bool`, optional (default=`False`)
        If `True`, we will overwrite the serialization directory if it already exists.
    node_rank : `int`, optional
        Rank of the current node in distributed training
    include_package : `str`, optional
        In distributed mode, extra packages mentioned will be imported in trainer workers.
    dry_run : `bool`, optional (default=`False`)
        Do not train a model, but create a vocabulary, show dataset statistics and other training
        information.

    # Returns

    best_model : `Optional[Model]`
        The model with the best epoch weights or `None` if in dry run.
    """
    # Load the experiment config from a file and pass it to `train_model`.
    params = Params.from_file(parameter_filename, overrides)
    return train_model(
        params=params,
        serialization_dir=serialization_dir,
        recover=recover,
        force=force,
        node_rank=node_rank,
        include_package=include_package,
        dry_run=dry_run,
    ) 
开发者ID:allenai,项目名称:allennlp,代码行数:54,代码来源:train.py

示例15: make_output_human_readable

# 需要导入模块: import overrides [as 别名]
# 或者: from overrides import overrides [as 别名]
def make_output_human_readable(
        self, output_dict: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """
        This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
        time, to finalize predictions. We only transform the action string sequences into logical
        forms here.
        """
        best_action_strings = output_dict["best_action_strings"]
        # Instantiating an empty world for getting logical forms.
        world = NlvrLanguage(set())
        logical_forms = []
        for instance_action_sequences in best_action_strings:
            instance_logical_forms = []
            for action_strings in instance_action_sequences:
                if action_strings:
                    instance_logical_forms.append(
                        world.action_sequence_to_logical_form(action_strings)
                    )
                else:
                    instance_logical_forms.append("")
            logical_forms.append(instance_logical_forms)

        action_mapping = output_dict["action_mapping"]
        best_actions = output_dict["best_action_strings"]
        debug_infos = output_dict["debug_info"]
        batch_action_info = []
        for batch_index, (predicted_actions, debug_info) in enumerate(
            zip(best_actions, debug_infos)
        ):
            instance_action_info = []
            for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info):
                action_info = {}
                action_info["predicted_action"] = predicted_action
                considered_actions = action_debug_info["considered_actions"]
                probabilities = action_debug_info["probabilities"]
                actions = []
                for action, probability in zip(considered_actions, probabilities):
                    if action != -1:
                        actions.append((action_mapping[(batch_index, action)], probability))
                actions.sort()
                considered_actions, probabilities = zip(*actions)
                action_info["considered_actions"] = considered_actions
                action_info["action_probabilities"] = probabilities
                action_info["question_attention"] = action_debug_info.get("question_attention", [])
                instance_action_info.append(action_info)
            batch_action_info.append(instance_action_info)
        output_dict["predicted_actions"] = batch_action_info
        output_dict["logical_form"] = logical_forms
        return output_dict 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:52,代码来源:nlvr_semantic_parser.py


注:本文中的overrides.overrides方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。