当前位置: 首页>>代码示例>>Python>>正文


Python util.ensure_list方法代码示例

本文整理汇总了Python中allennlp.common.util.ensure_list方法的典型用法代码示例。如果您正苦于以下问题:Python util.ensure_list方法的具体用法?Python util.ensure_list怎么用?Python util.ensure_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.util的用法示例。


在下文中一共展示了util.ensure_list方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_read_from_file_ag_news_corpus_and_truncates_properly

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file_ag_news_corpus_and_truncates_properly(self, lazy):
        reader = TextClassificationJsonReader(lazy=lazy, max_sequence_length=5)
        ag_path = (
            AllenNlpTestCase.FIXTURES_ROOT
            / "data"
            / "text_classification_json"
            / "ag_news_corpus.jsonl"
        )
        instances = reader.read(ag_path)
        instances = ensure_list(instances)

        instance1 = {"tokens": ["Memphis", "Rout", "Still", "Stings", "for"], "label": "2"}
        instance2 = {"tokens": ["AP", "-", "Eli", "Manning", "has"], "label": "2"}
        instance3 = {"tokens": ["A", "conference", "dedicated", "to", "online"], "label": "4"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
        assert fields["label"].label == instance1["label"]
        fields = instances[1].fields
        assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
        assert fields["label"].label == instance2["label"]
        fields = instances[2].fields
        assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
        assert fields["label"].label == instance3["label"] 
开发者ID:allenai,项目名称:allennlp,代码行数:27,代码来源:text_classification_json_test.py

示例2: test_brown_corpus_format

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_brown_corpus_format(self):
        reader = SequenceTaggingDatasetReader(word_tag_delimiter="/")
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "brown_corpus.txt")
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[1].fields
        assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[2].fields
        assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[3].fields
        assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"] 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:sequence_tagging_test.py

示例3: test_read_from_file

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file(self, lazy):
        reader = LanguageModelingReader(tokens_per_instance=3, lazy=lazy)

        instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'language_modeling.txt'))
        # The last potential instance is left out, which is ok, because we don't have an end token
        # in here, anyway.
        assert len(instances) == 5

        assert [t.text for t in instances[0].fields[u"input_tokens"].tokens] == [u"This", u"is", u"a"]
        assert [t.text for t in instances[0].fields[u"output_tokens"].tokens] == [u"is", u"a", u"sentence"]

        assert [t.text for t in instances[1].fields[u"input_tokens"].tokens] == [u"sentence", u"for", u"language"]
        assert [t.text for t in instances[1].fields[u"output_tokens"].tokens] == [u"for", u"language", u"modelling"]

        assert [t.text for t in instances[2].fields[u"input_tokens"].tokens] == [u"modelling", u".", u"Here"]
        assert [t.text for t in instances[2].fields[u"output_tokens"].tokens] == [u".", u"Here", u"'s"]

        assert [t.text for t in instances[3].fields[u"input_tokens"].tokens] == [u"'s", u"another", u"one"]
        assert [t.text for t in instances[3].fields[u"output_tokens"].tokens] == [u"another", u"one", u"for"]

        assert [t.text for t in instances[4].fields[u"input_tokens"].tokens] == [u"for", u"extra", u"language"]
        assert [t.text for t in instances[4].fields[u"output_tokens"].tokens] == [u"extra", u"language", u"modelling"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:24,代码来源:language_modeling_dataset_test.py

示例4: test_read_from_file

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file(self, lazy):
        reader = QuoraParaphraseDatasetReader(lazy=lazy)
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'quora_paraphrase.tsv')
        instances = ensure_list(instances)

        instance1 = {u"premise": u"What should I do to avoid sleeping in class ?".split(),
                     u"hypothesis": u"How do I not sleep in a boring class ?".split(),
                     u"label": u"1"}

        instance2 = {u"premise": u"Do women support each other more than men do ?".split(),
                     u"hypothesis": u"Do women need more compliments than men ?".split(),
                     u"label": u"0"}

        instance3 = {u"premise": u"How can one root android devices ?".split(),
                     u"hypothesis": u"How do I root an Android device ?".split(),
                     u"label": u"1"}

        assert len(instances) == 3

        for instance, expected_instance in izip(instances, [instance1, instance2, instance3]):
            fields = instance.fields
            assert [t.text for t in fields[u"premise"].tokens] == expected_instance[u"premise"]
            assert [t.text for t in fields[u"hypothesis"].tokens] == expected_instance[u"hypothesis"]
            assert fields[u"label"].label == expected_instance[u"label"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:26,代码来源:quora_paraphrase_test.py

示例5: test_read_from_file

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file(self, lazy):
        reader = StanfordSentimentTreeBankDatasetReader(lazy=lazy)
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"4"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}
        instance3 = {u"tokens": [u"Chomp", u"chomp", u"!"],
                     u"label": u"2"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:24,代码来源:stanford_sentiment_tree_bank_test.py

示例6: test_use_subtrees

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_use_subtrees(self):
        reader = StanfordSentimentTreeBankDatasetReader(use_subtrees=True)
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"4"}
        instance2 = {u"tokens": [u"The", u"actors"],
                     u"label": u"2"}
        instance3 = {u"tokens": [u"The"],
                     u"label": u"2"}

        assert len(instances) == 21
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:24,代码来源:stanford_sentiment_tree_bank_test.py

示例7: test_3_class

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_3_class(self):
        reader = StanfordSentimentTreeBankDatasetReader(granularity=u"3-class")
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"2"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}
        instance3 = {u"tokens": [u"Chomp", u"chomp", u"!"],
                     u"label": u"1"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:24,代码来源:stanford_sentiment_tree_bank_test.py

示例8: test_2_class

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_2_class(self):
        reader = StanfordSentimentTreeBankDatasetReader(granularity=u"2-class")
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"1"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}

        assert len(instances) == 2
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:19,代码来源:stanford_sentiment_tree_bank_test.py

示例9: test_default_format

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_default_format(self, lazy):
        reader = SequenceTaggingDatasetReader(lazy=lazy)
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'sequence_tagging.tsv')
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"cats", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"dogs", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"snakes", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[3].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"birds", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:20,代码来源:sequence_tagging_test.py

示例10: test_brown_corpus_format

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_brown_corpus_format(self):
        reader = SequenceTaggingDatasetReader(word_tag_delimiter=u'/')
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'brown_corpus.txt')
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"cats", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"dogs", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"snakes", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[3].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"birds", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:20,代码来源:sequence_tagging_test.py

示例11: test_read_from_file

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file(self, lazy, coding_scheme):
        conll_reader = Conll2003DatasetReader(lazy=lazy, coding_scheme=coding_scheme)
        instances = conll_reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'conll2003.txt'))
        instances = ensure_list(instances)

        if coding_scheme == u'IOB1':
            expected_labels = [u'I-ORG', u'O', u'I-PER', u'O', u'O', u'I-LOC', u'O']
        else:
            expected_labels = [u'U-ORG', u'O', u'U-PER', u'O', u'O', u'U-LOC', u'O']

        fields = instances[0].fields
        tokens = [t.text for t in fields[u'tokens'].tokens]
        assert tokens == [u'U.N.', u'official', u'Ekeus', u'heads', u'for', u'Baghdad', u'.']
        assert fields[u"tags"].labels == expected_labels

        fields = instances[1].fields
        tokens = [t.text for t in fields[u'tokens'].tokens]
        assert tokens == [u'AI2', u'engineer', u'Joel', u'lives', u'in', u'Seattle', u'.']
        assert fields[u"tags"].labels == expected_labels 
开发者ID:plasticityai,项目名称:magnitude,代码行数:21,代码来源:conll2003_dataset_reader_test.py

示例12: test_default_format

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_default_format(self, lazy):
        reader = Seq2SeqDatasetReader(lazy=lazy)
        instances = reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'seq2seq_copy.tsv'))
        instances = ensure_list(instances)

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"a", u"sentence", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"a", u"sentence", u"@end@"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"another", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"another", u"@end@"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"all", u"these", u"sentences",
                                                                    u"should", u"get", u"copied", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"all", u"these", u"sentences",
                                                                    u"should", u"get", u"copied", u"@end@"] 
开发者ID:plasticityai,项目名称:magnitude,代码行数:23,代码来源:seq2seq_test.py

示例13: test_read_from_file

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_read_from_file(self):
        # pylint: disable=R0201
        reader = IMDBLanguageModelingReader()
        dataset = reader.read(TestIMDBReader.DATASET_PATH)
        instances = ensure_list(dataset)

        assert len(instances) == 10
        fields = instances[0].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_0["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_0["target"]
        fields = instances[1].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_1["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_1["target"]
        fields = instances[7].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_7["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_7["target"] 
开发者ID:dangitstam,项目名称:topic-rnn,代码行数:18,代码来源:imdb_test.py

示例14: test_tacred_dataset_reader

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_tacred_dataset_reader(self):
        reader = get_reader()
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        # Check number of instances is correct
        self.assertEqual(len(instances), 3)

        # Check that first instance's tokens are correct
        tokens_0 = [x.text for x in instances[0]['tokens']]

        initial_tokens_0 = tokens_0[:6]
        expected_initial_tokens_0 = ['[CLS]', 'douglas', 'flint', '[SEP]', 'chairman', '[SEP]']
        self.assertListEqual(initial_tokens_0, expected_initial_tokens_0)

        final_tokens_0 = tokens_0[-6:]
        expected_final_tokens_0 = ['a', 'govern', '##ment', '[UNK]', '.', '[SEP]']
        self.assertListEqual(final_tokens_0, expected_final_tokens_0)

        # Check that first instances label is correct
        label_0 = instances[0]['label_ids'].label
        expected_label_0 = LABEL_MAP['per:title']
        self.assertEqual(label_0, expected_label_0) 
开发者ID:allenai,项目名称:kb,代码行数:24,代码来源:test_tacred_reader.py

示例15: test_entity_mask

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import ensure_list [as 别名]
def test_entity_mask(self):
        # Check 'mask' mode has expected behavior
        reader = get_reader()
        reader.entity_masking = 'mask'
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        tokens_0 = [x.text for x in instances[0]['tokens']]
        subj_tokens_0 = tokens_0[14]
        self.assertEqual(subj_tokens_0, '[MASK]')

        tokens_0 = [x.text for x in instances[0]['tokens']]
        obj_tokens_0 = tokens_0[17]
        self.assertEqual(obj_tokens_0, '[MASK]')

        # Check 'type/role' mode has expected behavior
        reader.entity_masking = 'type/role'
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        tokens_0 = [x.text for x in instances[0]['tokens']]
        subj_tokens_0 = tokens_0[14]
        self.assertEqual(subj_tokens_0, '[s-person]')

        tokens_0 = [x.text for x in instances[0]['tokens']]
        obj_tokens_0 = tokens_0[17]
        self.assertEqual(obj_tokens_0, '[o-title]') 
开发者ID:allenai,项目名称:kb,代码行数:27,代码来源:test_tacred_reader.py


注:本文中的allennlp.common.util.ensure_list方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。