當前位置: 首頁>>代碼示例>>Python>>正文


Python TokenIndexer.dict_from_params方法代碼示例

本文整理匯總了Python中allennlp.data.token_indexers.TokenIndexer.dict_from_params方法的典型用法代碼示例。如果您正苦於以下問題:Python TokenIndexer.dict_from_params方法的具體用法?Python TokenIndexer.dict_from_params怎麽用?Python TokenIndexer.dict_from_params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.data.token_indexers.TokenIndexer的用法示例。


在下文中一共展示了TokenIndexer.dict_from_params方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'FEVERSentenceReader':
        claim_tokenizer = Tokenizer.from_params(params.pop('claim_tokenizer', {}))
        wiki_tokenizer = Tokenizer.from_params(params.pop('wiki_tokenizer', {}))

        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        db = FeverDocDB(params.pop("db_path","data/fever/fever.db"))
        params.assert_empty(cls.__name__)
        return FEVERSentenceReader(db=db,
                           claim_tokenizer=claim_tokenizer,
                           wiki_tokenizer=wiki_tokenizer,
                           token_indexers=token_indexers) 
開發者ID:sheffieldnlp,項目名稱:fever-naacl-2018,代碼行數:13,代碼來源:reader.py

示例2: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'FEVERReader':
        claim_tokenizer = Tokenizer.from_params(params.pop('claim_tokenizer', {}))
        wiki_tokenizer = Tokenizer.from_params(params.pop('wiki_tokenizer', {}))

        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        sentence_level = params.pop("sentence_level",False)
        db = FeverDocDB(params.pop("db_path","data/fever.db"))
        params.assert_empty(cls.__name__)
        return FEVERReader(db=db,
                           sentence_level=sentence_level,
                           claim_tokenizer=claim_tokenizer,
                           wiki_tokenizer=wiki_tokenizer,
                           token_indexers=token_indexers) 
開發者ID:sheffieldnlp,項目名稱:fever-naacl-2018,代碼行數:15,代碼來源:reader.py

示例3: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'EntailmentTupleReader':
        tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        max_tuples = params.pop('max_tuples', 30)
        max_tokens = params.pop('max_tokens', 200)
        params.assert_empty(cls.__name__)
        return EntailmentTupleReader(max_tokens=max_tokens,
                                     max_tuples=max_tuples,
                                     tokenizer=tokenizer,
                                     token_indexers=token_indexers) 
開發者ID:allenai,項目名稱:scitail,代碼行數:12,代碼來源:entailment_tuple_reader.py

示例4: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProGlobalDatasetReader':
        token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexers", {}))
        token_position_indexers = TokenIndexer.dict_from_params(params.pop("token_position_indexers", {}))
        sent_position_indexers = TokenIndexer.dict_from_params(params.pop("sent_position_indexers", {}))
        return ProGlobalDatasetReader(token_indexers=token_indexers, token_position_indexers=token_position_indexers,
                                      sent_position_indexers=sent_position_indexers) 
開發者ID:allenai,項目名稱:propara,代碼行數:8,代碼來源:proglobal_dataset_reader.py

示例5: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProParaDatasetReader':
        token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexers", {}))
        multiple_annotations = params.pop_bool("multiple_annotations", False)

        return ProParaDatasetReader(token_indexers=token_indexers, multiple_annotations=multiple_annotations) 
開發者ID:allenai,項目名稱:propara,代碼行數:7,代碼來源:propara_dataset_reader.py

示例6: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ProLocalDatasetReader':
        token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexer", {}))
        params.assert_empty(cls.__name__)
        return ProLocalDatasetReader(token_indexers=token_indexers) 
開發者ID:allenai,項目名稱:propara,代碼行數:6,代碼來源:prolocal_dataset_reader.py

示例7: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'SwagReader':
        tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        use_only_gold_examples = params.pop('use_only_gold_examples', False)
        params.assert_empty(cls.__name__)
        return cls(tokenizer=tokenizer,
                   token_indexers=token_indexers,
                   use_only_gold_examples=use_only_gold_examples) 
開發者ID:rowanz,項目名稱:swagaf,代碼行數:10,代碼來源:dataset_reader.py

示例8: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'SwagReader':
        tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        use_only_gold_examples = params.pop('use_only_gold_examples', False)
        only_end = params.pop('only_end', False)
        params.assert_empty(cls.__name__)
        return cls(tokenizer=tokenizer,
                   token_indexers=token_indexers,
                   use_only_gold_examples=use_only_gold_examples,
                   only_end=only_end) 
開發者ID:rowanz,項目名稱:swagaf,代碼行數:12,代碼來源:dataset_reader.py

示例9: from_params

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def from_params(cls, params: Params) -> 'ArcMultiChoiceJsonReader':
        tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
        token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))

        return ArcMultiChoiceJsonReader(tokenizer=tokenizer,
                          token_indexers=token_indexers) 
開發者ID:allenai,項目名稱:ARC-Solvers,代碼行數:8,代碼來源:arc_multichoice_json_reader.py

示例10: text_to_instance

# 需要導入模塊: from allennlp.data.token_indexers import TokenIndexer [as 別名]
# 或者: from allennlp.data.token_indexers.TokenIndexer import dict_from_params [as 別名]
def text_to_instance(self,  # type: ignore
                         question_text: str,
                         passage_text: str,
                         question_id: str,
                         char_spans: List[Tuple[int, int]] = None,
                         answer_texts: List[str] = None,
                         passage_tokens: List[Token] = None) -> Instance:
        # pylint: disable=arguments-differ
        if not passage_tokens:
            passage_tokens = self._tokenizer.tokenize(passage_text)
        char_spans = char_spans or []

        # We need to convert character indices in `passage_text` to token indices in
        # `passage_tokens`, as the latter is what we'll actually use for supervision.
        token_spans: List[Tuple[int, int]] = []
        passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
        for char_span_start, char_span_end in char_spans:
            (span_start, span_end), error = util.char_span_to_token_span(passage_offsets,
                                                                         (char_span_start, char_span_end))
            if error:
                logger.debug("Passage: %s", passage_text)
                logger.debug("Passage tokens: %s", passage_tokens)
                logger.debug("Question text: %s", question_text)
                logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
                logger.debug("Token span: (%d, %d)", span_start, span_end)
                logger.debug("Tokens in answer: %s", passage_tokens[span_start:span_end + 1])
                logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
            token_spans.append((span_start, span_end))

        return make_reading_comprehension_instance(
                self._tokenizer.tokenize(question_text),
                passage_tokens,
                self._token_indexers,
                passage_text,
                question_id,
                token_spans,
                answer_texts)

    # @classmethod
    # def from_params(cls, params: Params) -> 'Squad2Reader':
        # tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
        # token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
        # lazy = params.pop('lazy', False)
        # params.assert_empty(cls.__name__)
        # return cls(tokenizer=tokenizer, token_indexers=token_indexers, lazy=lazy) 
開發者ID:mandarjoshi90,項目名稱:pair2vec,代碼行數:47,代碼來源:squad2_reader.py


注:本文中的allennlp.data.token_indexers.TokenIndexer.dict_from_params方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。