当前位置: 首页>>代码示例>>Python>>正文


Python models.Model方法代码示例

本文整理汇总了Python中allennlp.models.Model方法的典型用法代码示例。如果您正苦于以下问题:Python models.Model方法的具体用法?Python models.Model怎么用?Python models.Model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.models的用法示例。


在下文中一共展示了models.Model方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, serialization_dir: str, model: Model) -> None:
        """
        A trivial trainer to assist in making model archives for models that do not actually
        require training. For instance, a majority class baseline.

        In a typical AllenNLP configuration file, neither the `serialization_dir` nor the `model`
        arguments would need an entry.
        """

        super().__init__(serialization_dir, cuda_device=-1)
        self.model = model 
开发者ID:allenai,项目名称:allennlp,代码行数:13,代码来源:no_op_trainer.py

示例2: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(
        self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
    ) -> None:
        super().__init__(model, dataset_reader)
        self._tokenizer = SpacyTokenizer(language=language, pos_tags=True) 
开发者ID:allenai,项目名称:allennlp,代码行数:7,代码来源:sentence_tagger.py

示例3: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
        super().__init__(model, dataset_reader) 
开发者ID:Hyperparticle,项目名称:udify,代码行数:4,代码来源:predictor.py

示例4: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
                 model: Model,
                 dataset_reader: DatasetReader,
                 output_conllu: bool = False) -> None:
        super().__init__(model, dataset_reader)
        self._dataset_reader = UniversalDependenciesRawDatasetReader(self._dataset_reader)
        self.predictor = UdifyPredictor(model, dataset_reader)
        self.output_conllu = output_conllu 
开发者ID:Hyperparticle,项目名称:udify,代码行数:10,代码来源:text_predictor.py

示例5: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
        super().__init__(model, dataset_reader)
        warnings.warn(
            "The 'copynet' predictor has been deprecated in favor of "
            "the 'seq2seq' predictor.",
            DeprecationWarning,
        ) 
开发者ID:epwalsh,项目名称:nlp-models,代码行数:9,代码来源:copynet.py

示例6: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
                 model: Model,
                 dataset_reader: DatasetReader) -> None:
        super().__init__(model, dataset_reader)
        self.tokenizer = WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True)) 
开发者ID:allenai,项目名称:propara,代码行数:7,代码来源:prostruct_prediction.py

示例7: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
        super().__init__(model, dataset_reader)
        self._entailment_idx = self._model.vocab.get_token_index("entailment", "labels")
        self._contradiction_idx = self._model.vocab.get_token_index("contradiction", "labels")
        self._neutral_idx = self._model.vocab.get_token_index("neutral", "labels") 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:7,代码来源:entailment_pair.py

示例8: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
                 model: Model,
                 dataset_reader: DatasetReader,
                 fix_subwords=True) -> None:
        super().__init__(model, dataset_reader)
        self._fix_subwords = fix_subwords 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:8,代码来源:summary_predictor.py

示例9: __init__

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
                 model: Model,
                 dataset_reader: DatasetReader,
                 top_n=3,
                 border=None,
                 fix_subwords=True) -> None:
        super().__init__(model, dataset_reader)
        self._top_n = top_n
        self._border = border
        self._fix_subwords = fix_subwords 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:12,代码来源:summary_sentences_predictor.py

示例10: train_model_from_file

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def train_model_from_file(parameter_filename: str,
                          serialization_dir: str,
                          overrides: str = "",
                          file_friendly_logging: bool = False,
                          recover: bool = False,
                          force: bool = False,
                          ext_vars=None) -> Model:
    """
    A wrapper around :func:`train_model` which loads the params from a file.

    Parameters
    ----------
    param_path : ``str``
        A json parameter file specifying an AllenNLP experiment.
    serialization_dir : ``str``
        The directory in which to save results and logs. We just pass this along to
        :func:`train_model`.
    overrides : ``str``
        A JSON string that we will use to override values in the input parameter file.
    file_friendly_logging : ``bool``, optional (default=False)
        If ``True``, we make our output more friendly to saved model files.  We just pass this
        along to :func:`train_model`.
    recover : ``bool`, optional (default=False)
        If ``True``, we will try to recover a training run from an existing serialization
        directory.  This is only intended for use when something actually crashed during the middle
        of a run.  For continuing training a model on new data, see the ``fine-tune`` command.
    """
    # Load the experiment config from a file and pass it to ``train_model``.
    params = Params.from_file(parameter_filename, overrides, ext_vars=ext_vars)
    return train_model(params, serialization_dir, file_friendly_logging, recover, force) 
开发者ID:matthew-z,项目名称:R-net,代码行数:32,代码来源:main.py

示例11: eval_model

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def eval_model(db: FeverDocDB, args) -> Model:
    archive = load_archive(args.archive_file, cuda_device=args.cuda_device)

    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                                 sentence_level=ds_params.pop("sentence_level",False),
                                 wiki_tokenizer=Tokenizer.from_params(ds_params.pop('wiki_tokenizer', {})),
                                 claim_tokenizer=Tokenizer.from_params(ds_params.pop('claim_tokenizer', {})),
                                 token_indexers=TokenIndexer.dict_from_params(ds_params.pop('token_indexers', {})))

    logger.info("Reading training data from %s", args.in_file)
    data = reader.read(args.in_file).instances

    actual = []
    predicted = []

    if args.log is not None:
        f = open(args.log,"w+")

    for item in tqdm(data):
        if item.fields["premise"] is None or item.fields["premise"].sequence_length() == 0:
            cls = "NOT ENOUGH INFO"
        else:
            prediction = model.forward_on_instance(item, args.cuda_device)
            cls = model.vocab._index_to_token["labels"][np.argmax(prediction["label_probs"])]

        if "label" in item.fields:
            actual.append(item.fields["label"].label)
        predicted.append(cls)

        if args.log is not None:
            if "label" in item.fields:
                f.write(json.dumps({"actual":item.fields["label"].label,"predicted":cls})+"\n")
            else:
                f.write(json.dumps({"predicted":cls})+"\n")

    if args.log is not None:
        f.close()


    if len(actual) > 0:
        print(accuracy_score(actual, predicted))
        print(classification_report(actual, predicted))
        print(confusion_matrix(actual, predicted))

    return model 
开发者ID:sheffieldnlp,项目名称:fever-naacl-2018,代码行数:53,代码来源:eval_da.py

示例12: eval_model

# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def eval_model(db: FeverDocDB, args) -> Model:
    archive = load_archive(args.archive_file, cuda_device=args.cuda_device, overrides=args.overrides)

    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                                 sentence_level=ds_params.pop("sentence_level",False),
                                 wiki_tokenizer=Tokenizer.from_params(ds_params.pop('wiki_tokenizer', {})),
                                 claim_tokenizer=Tokenizer.from_params(ds_params.pop('claim_tokenizer', {})),
                                 token_indexers=TokenIndexer.dict_from_params(ds_params.pop('token_indexers', {})))


    while True:

        claim = input("enter claim (or q to quit) >>")
        if claim.lower() == "q":
            break

        ranker = retriever.get_class('tfidf')(tfidf_path=args.model)

        p_lines = []
        pages,_ = ranker.closest_docs(claim,5)

        for page in pages:
            lines = db.get_doc_lines(page)
            lines = [line.split("\t")[1] if len(line.split("\t")[1]) > 1 else "" for line in lines.split("\n")]

            p_lines.extend(zip(lines, [page] * len(lines), range(len(lines))))

        scores = tf_idf_sim(claim, [pl[0] for pl in p_lines])
        scores = list(zip(scores, [pl[1] for pl in p_lines], [pl[2] for pl in p_lines], [pl[0] for pl in p_lines]))
        scores = list(filter(lambda score: len(score[3].strip()), scores))
        sentences_l = list(sorted(scores, reverse=True, key=lambda elem: elem[0]))

        sentences = [s[3] for s in sentences_l[:5]]
        evidence = " ".join(sentences)


        print("Best pages: {0}".format(repr(pages)))

        print("Evidence:")
        for idx,sentence in enumerate(sentences_l[:5]):
            print("{0}\t{1}\t\t{2}\t{3}".format(idx+1, sentence[0], sentence[1],sentence[3]) )

        item = reader.text_to_instance(evidence, claim)

        prediction = model.forward_on_instance(item, args.cuda_device)
        cls = model.vocab._index_to_token["labels"][np.argmax(prediction["label_probs"])]
        print("PREDICTED: {0}".format(cls))
        print() 
开发者ID:sheffieldnlp,项目名称:fever-naacl-2018,代码行数:56,代码来源:interactive.py


注:本文中的allennlp.models.Model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。