本文整理匯總了Python中allennlp.data.token_indexers.TokenIndexer.dict_from_params方法的典型用法代碼示例。如果您正苦於以下問題:Python TokenIndexer.dict_from_params方法的具體用法?Python TokenIndexer.dict_from_params怎麽用?Python TokenIndexer.dict_from_params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.data.token_indexers.TokenIndexer
的用法示例。
在下文中一共展示了TokenIndexer.dict_from_params方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'FEVERSentenceReader':
claim_tokenizer = Tokenizer.from_params(params.pop('claim_tokenizer', {}))
wiki_tokenizer = Tokenizer.from_params(params.pop('wiki_tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
db = FeverDocDB(params.pop("db_path","data/fever/fever.db"))
params.assert_empty(cls.__name__)
return FEVERSentenceReader(db=db,
claim_tokenizer=claim_tokenizer,
wiki_tokenizer=wiki_tokenizer,
token_indexers=token_indexers)
示例2: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'FEVERReader':
claim_tokenizer = Tokenizer.from_params(params.pop('claim_tokenizer', {}))
wiki_tokenizer = Tokenizer.from_params(params.pop('wiki_tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
sentence_level = params.pop("sentence_level",False)
db = FeverDocDB(params.pop("db_path","data/fever.db"))
params.assert_empty(cls.__name__)
return FEVERReader(db=db,
sentence_level=sentence_level,
claim_tokenizer=claim_tokenizer,
wiki_tokenizer=wiki_tokenizer,
token_indexers=token_indexers)
示例3: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'EntailmentTupleReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
max_tuples = params.pop('max_tuples', 30)
max_tokens = params.pop('max_tokens', 200)
params.assert_empty(cls.__name__)
return EntailmentTupleReader(max_tokens=max_tokens,
max_tuples=max_tuples,
tokenizer=tokenizer,
token_indexers=token_indexers)
示例4: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProGlobalDatasetReader':
token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexers", {}))
token_position_indexers = TokenIndexer.dict_from_params(params.pop("token_position_indexers", {}))
sent_position_indexers = TokenIndexer.dict_from_params(params.pop("sent_position_indexers", {}))
return ProGlobalDatasetReader(token_indexers=token_indexers, token_position_indexers=token_position_indexers,
sent_position_indexers=sent_position_indexers)
示例5: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProParaDatasetReader':
token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexers", {}))
multiple_annotations = params.pop_bool("multiple_annotations", False)
return ProParaDatasetReader(token_indexers=token_indexers, multiple_annotations=multiple_annotations)
示例6: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProLocalDatasetReader':
token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexer", {}))
params.assert_empty(cls.__name__)
return ProLocalDatasetReader(token_indexers=token_indexers)
示例7: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'SwagReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
use_only_gold_examples = params.pop('use_only_gold_examples', False)
params.assert_empty(cls.__name__)
return cls(tokenizer=tokenizer,
token_indexers=token_indexers,
use_only_gold_examples=use_only_gold_examples)
示例8: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'SwagReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
use_only_gold_examples = params.pop('use_only_gold_examples', False)
only_end = params.pop('only_end', False)
params.assert_empty(cls.__name__)
return cls(tokenizer=tokenizer,
token_indexers=token_indexers,
use_only_gold_examples=use_only_gold_examples,
only_end=only_end)
示例9: from_params
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ArcMultiChoiceJsonReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
return ArcMultiChoiceJsonReader(tokenizer=tokenizer,
token_indexers=token_indexers)
示例10: text_to_instance
# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def text_to_instance(self, # type: ignore
question_text: str,
passage_text: str,
question_id: str,
char_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
passage_tokens: List[Token] = None) -> Instance:
# pylint: disable=arguments-differ
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
char_spans = char_spans or []
# We need to convert character indices in `passage_text` to token indices in
# `passage_tokens`, as the latter is what we'll actually use for supervision.
token_spans: List[Tuple[int, int]] = []
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
for char_span_start, char_span_end in char_spans:
(span_start, span_end), error = util.char_span_to_token_span(passage_offsets,
(char_span_start, char_span_end))
if error:
logger.debug("Passage: %s", passage_text)
logger.debug("Passage tokens: %s", passage_tokens)
logger.debug("Question text: %s", question_text)
logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
logger.debug("Token span: (%d, %d)", span_start, span_end)
logger.debug("Tokens in answer: %s", passage_tokens[span_start:span_end + 1])
logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
token_spans.append((span_start, span_end))
return make_reading_comprehension_instance(
self._tokenizer.tokenize(question_text),
passage_tokens,
self._token_indexers,
passage_text,
question_id,
token_spans,
answer_texts)
# @classmethod
# def from_params(cls, params: Params) -> 'Squad2Reader':
# tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
# token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
# lazy = params.pop('lazy', False)
# params.assert_empty(cls.__name__)
# return cls(tokenizer=tokenizer, token_indexers=token_indexers, lazy=lazy)