本文整理汇总了Python中allennlp.models.Model方法的典型用法代码示例。如果您正苦于以下问题:Python models.Model方法的具体用法?Python models.Model怎么用?Python models.Model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.models
的用法示例。
在下文中一共展示了models.Model方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, serialization_dir: str, model: Model) -> None:
"""
A trivial trainer to assist in making model archives for models that do not actually
require training. For instance, a majority class baseline.
In a typical AllenNLP configuration file, neither the `serialization_dir` nor the `model`
arguments would need an entry.
"""
super().__init__(serialization_dir, cuda_device=-1)
self.model = model
示例2: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
示例3: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
示例4: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
model: Model,
dataset_reader: DatasetReader,
output_conllu: bool = False) -> None:
super().__init__(model, dataset_reader)
self._dataset_reader = UniversalDependenciesRawDatasetReader(self._dataset_reader)
self.predictor = UdifyPredictor(model, dataset_reader)
self.output_conllu = output_conllu
示例5: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
warnings.warn(
"The 'copynet' predictor has been deprecated in favor of "
"the 'seq2seq' predictor.",
DeprecationWarning,
)
示例6: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
model: Model,
dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
self.tokenizer = WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True))
示例7: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
self._entailment_idx = self._model.vocab.get_token_index("entailment", "labels")
self._contradiction_idx = self._model.vocab.get_token_index("contradiction", "labels")
self._neutral_idx = self._model.vocab.get_token_index("neutral", "labels")
示例8: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
model: Model,
dataset_reader: DatasetReader,
fix_subwords=True) -> None:
super().__init__(model, dataset_reader)
self._fix_subwords = fix_subwords
示例9: __init__
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def __init__(self,
model: Model,
dataset_reader: DatasetReader,
top_n=3,
border=None,
fix_subwords=True) -> None:
super().__init__(model, dataset_reader)
self._top_n = top_n
self._border = border
self._fix_subwords = fix_subwords
示例10: train_model_from_file
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
ext_vars=None) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
param_path : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides, ext_vars=ext_vars)
return train_model(params, serialization_dir, file_friendly_logging, recover, force)
示例11: eval_model
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def eval_model(db: FeverDocDB, args) -> Model:
archive = load_archive(args.archive_file, cuda_device=args.cuda_device)
config = archive.config
ds_params = config["dataset_reader"]
model = archive.model
model.eval()
reader = FEVERReader(db,
sentence_level=ds_params.pop("sentence_level",False),
wiki_tokenizer=Tokenizer.from_params(ds_params.pop('wiki_tokenizer', {})),
claim_tokenizer=Tokenizer.from_params(ds_params.pop('claim_tokenizer', {})),
token_indexers=TokenIndexer.dict_from_params(ds_params.pop('token_indexers', {})))
logger.info("Reading training data from %s", args.in_file)
data = reader.read(args.in_file).instances
actual = []
predicted = []
if args.log is not None:
f = open(args.log,"w+")
for item in tqdm(data):
if item.fields["premise"] is None or item.fields["premise"].sequence_length() == 0:
cls = "NOT ENOUGH INFO"
else:
prediction = model.forward_on_instance(item, args.cuda_device)
cls = model.vocab._index_to_token["labels"][np.argmax(prediction["label_probs"])]
if "label" in item.fields:
actual.append(item.fields["label"].label)
predicted.append(cls)
if args.log is not None:
if "label" in item.fields:
f.write(json.dumps({"actual":item.fields["label"].label,"predicted":cls})+"\n")
else:
f.write(json.dumps({"predicted":cls})+"\n")
if args.log is not None:
f.close()
if len(actual) > 0:
print(accuracy_score(actual, predicted))
print(classification_report(actual, predicted))
print(confusion_matrix(actual, predicted))
return model
示例12: eval_model
# 需要导入模块: from allennlp import models [as 别名]
# 或者: from allennlp.models import Model [as 别名]
def eval_model(db: FeverDocDB, args) -> Model:
archive = load_archive(args.archive_file, cuda_device=args.cuda_device, overrides=args.overrides)
config = archive.config
ds_params = config["dataset_reader"]
model = archive.model
model.eval()
reader = FEVERReader(db,
sentence_level=ds_params.pop("sentence_level",False),
wiki_tokenizer=Tokenizer.from_params(ds_params.pop('wiki_tokenizer', {})),
claim_tokenizer=Tokenizer.from_params(ds_params.pop('claim_tokenizer', {})),
token_indexers=TokenIndexer.dict_from_params(ds_params.pop('token_indexers', {})))
while True:
claim = input("enter claim (or q to quit) >>")
if claim.lower() == "q":
break
ranker = retriever.get_class('tfidf')(tfidf_path=args.model)
p_lines = []
pages,_ = ranker.closest_docs(claim,5)
for page in pages:
lines = db.get_doc_lines(page)
lines = [line.split("\t")[1] if len(line.split("\t")[1]) > 1 else "" for line in lines.split("\n")]
p_lines.extend(zip(lines, [page] * len(lines), range(len(lines))))
scores = tf_idf_sim(claim, [pl[0] for pl in p_lines])
scores = list(zip(scores, [pl[1] for pl in p_lines], [pl[2] for pl in p_lines], [pl[0] for pl in p_lines]))
scores = list(filter(lambda score: len(score[3].strip()), scores))
sentences_l = list(sorted(scores, reverse=True, key=lambda elem: elem[0]))
sentences = [s[3] for s in sentences_l[:5]]
evidence = " ".join(sentences)
print("Best pages: {0}".format(repr(pages)))
print("Evidence:")
for idx,sentence in enumerate(sentences_l[:5]):
print("{0}\t{1}\t\t{2}\t{3}".format(idx+1, sentence[0], sentence[1],sentence[3]) )
item = reader.text_to_instance(evidence, claim)
prediction = model.forward_on_instance(item, args.cuda_device)
cls = model.vocab._index_to_token["labels"][np.argmax(prediction["label_probs"])]
print("PREDICTED: {0}".format(cls))
print()