当前位置: 首页>>代码示例>>Python>>正文


Python fields.ListField方法代码示例

本文整理汇总了Python中allennlp.data.fields.ListField方法的典型用法代码示例。如果您正苦于以下问题:Python fields.ListField方法的具体用法?Python fields.ListField怎么用?Python fields.ListField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.data.fields的用法示例。


在下文中一共展示了fields.ListField方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: text_to_instance

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def text_to_instance(self,  # type: ignore
                         item_id: Any,
                         question_text: str,
                         choice_text_list: List[str],
                         answer_id: int
                         ) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        question_tokens = self._tokenizer.tokenize(question_text)
        choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
        fields['question'] = TextField(question_tokens, self._token_indexers)
        fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
        fields['label'] = LabelField(answer_id, skip_indexing=True)

        metadata = {
            "id": item_id,
            "question_text": question_text,
            "choice_text_list": choice_text_list,
            "question_tokens": [x.text for x in question_tokens],
            "choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
        }

        fields["metadata"] = MetadataField(metadata)

        return Instance(fields) 
开发者ID:allenai,项目名称:OpenBookQA,代码行数:27,代码来源:arc_multichoice_json_reader.py

示例2: test_padding_handles_list_fields_with_padding_values

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_padding_handles_list_fields_with_padding_values(self):
        array1 = ArrayField(numpy.ones([2, 3]), padding_value=-1)
        array2 = ArrayField(numpy.ones([1, 5]), padding_value=-1)
        empty_array = array1.empty_field()
        list_field = ListField([array1, array2, empty_array])

        returned_tensor = (
            list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
        )
        correct_tensor = numpy.array(
            [
                [[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]],
                [[1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
                [[-1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
            ]
        )
        numpy.testing.assert_array_equal(returned_tensor, correct_tensor) 
开发者ID:allenai,项目名称:allennlp,代码行数:19,代码来源:array_field_test.py

示例3: test_fields_can_pad_to_greater_than_max_length

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_fields_can_pad_to_greater_than_max_length(self):
        list_field = ListField([self.field1, self.field2, self.field3])
        list_field.index(self.vocab)
        padding_lengths = list_field.get_padding_lengths()
        padding_lengths[u"list_num_tokens"] = 7
        padding_lengths[u"num_fields"] = 5
        tensor_dict = list_field.as_tensor(padding_lengths)
        numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][0].detach().cpu().numpy(),
                                                numpy.array([2, 3, 4, 5, 0, 0, 0]))
        numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][1].detach().cpu().numpy(),
                                                numpy.array([2, 3, 4, 1, 5, 0, 0]))
        numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][2].detach().cpu().numpy(),
                                                numpy.array([2, 3, 1, 5, 0, 0, 0]))
        numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][3].detach().cpu().numpy(),
                                                numpy.array([0, 0, 0, 0, 0, 0, 0]))
        numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][4].detach().cpu().numpy(),
                                                numpy.array([0, 0, 0, 0, 0, 0, 0])) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:19,代码来源:list_field_test.py

示例4: text_to_instance

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def text_to_instance(self, text: str, sentences: List[str] = None, tags: List[int] = None) -> Instance:
        if sentences is None:
            if self._language == "ru":
                sentences = [s.text for s in razdel.sentenize(text)]
            else:
                sentences = nltk.tokenize.sent_tokenize(text)
        sentences_tokens = []
        for sentence in sentences[:self._max_sentences_count]:
            sentence = sentence.lower() if self._lowercase else sentence
            tokens = self._tokenizer.tokenize(sentence)[:self._sentence_max_tokens]
            tokens.insert(0, Token(START_SYMBOL))
            tokens.append(Token(END_SYMBOL))
            indexed_tokens = TextField(tokens, self._source_token_indexers)
            sentences_tokens.append(indexed_tokens)

        sentences_tokens_indexed = ListField(sentences_tokens)
        result = {'source_sentences': sentences_tokens_indexed}

        if tags:
            result["sentences_tags"] = SequenceLabelField(tags[:self._max_sentences_count], sentences_tokens_indexed)
        return Instance(result) 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:23,代码来源:summarization_sentence_tagger_reader.py

示例5: text_to_instance

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def text_to_instance(self,  # type: ignore
                         item_id: Any,
                         question_text: str,
                         choice_text_list: List[str],
                         answer_id: int) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        question_tokens = self._tokenizer.tokenize(question_text)
        choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
        fields['question'] = TextField(question_tokens, self._token_indexers)
        fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
        fields['label'] = LabelField(answer_id, skip_indexing=True)

        metadata = {
           "id": item_id,
           "question_text": question_text,
           "choice_text_list": choice_text_list,
           "question_tokens": [x.text for x in question_tokens],
           "choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
        }

        fields["metadata"] = MetadataField(metadata)

        return Instance(fields) 
开发者ID:allenai,项目名称:ARC-Solvers,代码行数:26,代码来源:arc_multichoice_json_reader.py

示例6: text_to_instance

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def text_to_instance(
        self,  # type: ignore
        context: List[List[str]],
        question: List[str],
        answer: str,
        supports: List[int],
    ) -> Instance:

        fields: Dict[str, Field] = {}

        if self._keep_sentences:
            context_field_ks = ListField(
                [
                    TextField([Token(word) for word in line], self._token_indexers)
                    for line in context
                ]
            )

            fields["supports"] = ListField(
                [IndexField(support, context_field_ks) for support in supports]
            )
        else:
            context_field = TextField(
                [Token(word) for line in context for word in line], self._token_indexers
            )

        fields["context"] = context_field_ks if self._keep_sentences else context_field
        fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
        fields["answer"] = TextField([Token(answer)], self._token_indexers)

        return Instance(fields) 
开发者ID:allenai,项目名称:allennlp,代码行数:33,代码来源:babi.py

示例7: text_to_instance

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def text_to_instance(
        self, text: str, label: Union[str, int] = None
    ) -> Instance:  # type: ignore
        """
        # Parameters

        text : `str`, required.
            The text to classify
        label : `str`, optional, (default = `None`).
            The label for this text.

        # Returns

        An `Instance` containing the following fields:
            - tokens (`TextField`) :
              The tokens in the sentence or phrase.
            - label (`LabelField`) :
              The label label of the sentence or phrase.
        """

        fields: Dict[str, Field] = {}
        if self._segment_sentences:
            sentences: List[Field] = []
            sentence_splits = self._sentence_segmenter.split_sentences(text)
            for sentence in sentence_splits:
                word_tokens = self._tokenizer.tokenize(sentence)
                if self._max_sequence_length is not None:
                    word_tokens = self._truncate(word_tokens)
                sentences.append(TextField(word_tokens, self._token_indexers))
            fields["tokens"] = ListField(sentences)
        else:
            tokens = self._tokenizer.tokenize(text)
            if self._max_sequence_length is not None:
                tokens = self._truncate(tokens)
            fields["tokens"] = TextField(tokens, self._token_indexers)
        if label is not None:
            fields["label"] = LabelField(label, skip_indexing=self._skip_label_indexing)
        return Instance(fields) 
开发者ID:allenai,项目名称:allennlp,代码行数:40,代码来源:text_classification_json.py

示例8: test_elmo_empty_token_list

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_elmo_empty_token_list(self):
        # Basic test
        indexer = ELMoTokenCharactersIndexer()
        assert {"elmo_tokens": []} == indexer.get_empty_token_list()
        # Real world test
        indexer = {"elmo": indexer}
        tokens_1 = TextField([Token("Apple")], indexer)
        targets_1 = ListField([TextField([Token("Apple")], indexer)])
        tokens_2 = TextField([Token("Screen"), Token("device")], indexer)
        targets_2 = ListField(
            [TextField([Token("Screen")], indexer), TextField([Token("Device")], indexer)]
        )
        instance_1 = Instance({"tokens": tokens_1, "targets": targets_1})
        instance_2 = Instance({"tokens": tokens_2, "targets": targets_2})
        a_batch = Batch([instance_1, instance_2])
        a_batch.index_instances(Vocabulary())
        batch_tensor = a_batch.as_tensor_dict()
        elmo_target_token_indices = batch_tensor["targets"]["elmo"]["elmo_tokens"]
        # The TextField that is empty should have been created using the
        # `get_empty_token_list` and then padded with zeros.
        empty_target = elmo_target_token_indices[0][1].numpy()
        np.testing.assert_array_equal(np.zeros((1, 50)), empty_target)
        non_empty_targets = [
            elmo_target_token_indices[0][0],
            elmo_target_token_indices[1][0],
            elmo_target_token_indices[1][1],
        ]
        for non_empty_target in non_empty_targets:
            with pytest.raises(AssertionError):
                np.testing.assert_array_equal(np.zeros((1, 50)), non_empty_target) 
开发者ID:allenai,项目名称:allennlp,代码行数:32,代码来源:elmo_indexer_test.py

示例9: test_get_padding_lengths

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_get_padding_lengths(self):
        list_field = ListField([self.field1, self.field2, self.field3])
        list_field.index(self.vocab)
        lengths = list_field.get_padding_lengths()
        assert lengths == {"num_fields": 3, "list_words___tokens": 5} 
开发者ID:allenai,项目名称:allennlp,代码行数:7,代码来源:list_field_test.py

示例10: test_list_field_can_handle_empty_index_fields

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_list_field_can_handle_empty_index_fields(self):
        list_field = ListField([self.index_field, self.index_field, self.empty_index_field])
        list_field.index(self.vocab)
        tensor = list_field.as_tensor(list_field.get_padding_lengths())
        numpy.testing.assert_array_equal(
            tensor.detach().cpu().numpy(), numpy.array([[1], [1], [-1]])
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:9,代码来源:list_field_test.py

示例11: test_list_field_can_handle_empty_sequence_label_fields

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_list_field_can_handle_empty_sequence_label_fields(self):
        list_field = ListField(
            [self.sequence_label_field, self.sequence_label_field, self.empty_sequence_label_field]
        )
        list_field.index(self.vocab)
        tensor = list_field.as_tensor(list_field.get_padding_lengths())
        numpy.testing.assert_array_equal(
            tensor.detach().cpu().numpy(), numpy.array([[1, 1, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]])
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:11,代码来源:list_field_test.py

示例12: test_all_fields_padded_to_max_length

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_all_fields_padded_to_max_length(self):
        list_field = ListField([self.field1, self.field2, self.field3])
        list_field.index(self.vocab)
        tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][0].detach().cpu().numpy(), numpy.array([2, 3, 4, 5, 0])
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][1].detach().cpu().numpy(), numpy.array([2, 3, 4, 1, 5])
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][2].detach().cpu().numpy(), numpy.array([2, 3, 1, 5, 0])
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:15,代码来源:list_field_test.py

示例13: test_nested_list_fields_are_padded_correctly

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_nested_list_fields_are_padded_correctly(self):
        nested_field1 = ListField([LabelField(c) for c in ["a", "b", "c", "d", "e"]])
        nested_field2 = ListField([LabelField(c) for c in ["f", "g", "h", "i", "j", "k"]])
        list_field = ListField([nested_field1.empty_field(), nested_field1, nested_field2])
        list_field.index(self.vocab)
        padding_lengths = list_field.get_padding_lengths()
        assert padding_lengths == {"num_fields": 3, "list_num_fields": 6}
        tensor = list_field.as_tensor(padding_lengths).detach().cpu().numpy()
        numpy.testing.assert_almost_equal(
            tensor, [[-1, -1, -1, -1, -1, -1], [0, 1, 2, 3, 4, -1], [5, 6, 7, 8, 9, 10]]
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:13,代码来源:list_field_test.py

示例14: test_fields_can_pad_to_greater_than_max_length

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_fields_can_pad_to_greater_than_max_length(self):
        list_field = ListField([self.field1, self.field2, self.field3])
        list_field.index(self.vocab)
        padding_lengths = list_field.get_padding_lengths()
        padding_lengths["list_words___tokens"] = 7
        padding_lengths["num_fields"] = 5
        tensor_dict = list_field.as_tensor(padding_lengths)
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][0].detach().cpu().numpy(),
            numpy.array([2, 3, 4, 5, 0, 0, 0]),
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][1].detach().cpu().numpy(),
            numpy.array([2, 3, 4, 1, 5, 0, 0]),
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][2].detach().cpu().numpy(),
            numpy.array([2, 3, 1, 5, 0, 0, 0]),
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][3].detach().cpu().numpy(),
            numpy.array([0, 0, 0, 0, 0, 0, 0]),
        )
        numpy.testing.assert_array_almost_equal(
            tensor_dict["words"]["tokens"][4].detach().cpu().numpy(),
            numpy.array([0, 0, 0, 0, 0, 0, 0]),
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:29,代码来源:list_field_test.py

示例15: test_printing_doesnt_crash

# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import ListField [as 别名]
def test_printing_doesnt_crash(self):
        list_field = ListField([self.field1, self.field2])
        print(list_field) 
开发者ID:allenai,项目名称:allennlp,代码行数:5,代码来源:list_field_test.py


注:本文中的allennlp.data.fields.ListField方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。