當前位置: 首頁>>代碼示例>>Python>>正文


Python util.ensure_list方法代碼示例

本文整理匯總了Python中allennlp.common.util.ensure_list方法的典型用法代碼示例。如果您正苦於以下問題:Python util.ensure_list方法的具體用法?Python util.ensure_list怎麽用?Python util.ensure_list使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.common.util的用法示例。


在下文中一共展示了util.ensure_list方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_read_from_file_ag_news_corpus_and_truncates_properly

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file_ag_news_corpus_and_truncates_properly(self, lazy):
        reader = TextClassificationJsonReader(lazy=lazy, max_sequence_length=5)
        ag_path = (
            AllenNlpTestCase.FIXTURES_ROOT
            / "data"
            / "text_classification_json"
            / "ag_news_corpus.jsonl"
        )
        instances = reader.read(ag_path)
        instances = ensure_list(instances)

        instance1 = {"tokens": ["Memphis", "Rout", "Still", "Stings", "for"], "label": "2"}
        instance2 = {"tokens": ["AP", "-", "Eli", "Manning", "has"], "label": "2"}
        instance3 = {"tokens": ["A", "conference", "dedicated", "to", "online"], "label": "4"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
        assert fields["label"].label == instance1["label"]
        fields = instances[1].fields
        assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
        assert fields["label"].label == instance2["label"]
        fields = instances[2].fields
        assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
        assert fields["label"].label == instance3["label"] 
開發者ID:allenai,項目名稱:allennlp,代碼行數:27,代碼來源:text_classification_json_test.py

示例2: test_brown_corpus_format

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_brown_corpus_format(self):
        reader = SequenceTaggingDatasetReader(word_tag_delimiter="/")
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "brown_corpus.txt")
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[1].fields
        assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[2].fields
        assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"]
        fields = instances[3].fields
        assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
        assert fields["tags"].labels == ["N", "V", "N", "N"] 
開發者ID:allenai,項目名稱:allennlp,代碼行數:20,代碼來源:sequence_tagging_test.py

示例3: test_read_from_file

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file(self, lazy):
        reader = LanguageModelingReader(tokens_per_instance=3, lazy=lazy)

        instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'language_modeling.txt'))
        # The last potential instance is left out, which is ok, because we don't have an end token
        # in here, anyway.
        assert len(instances) == 5

        assert [t.text for t in instances[0].fields[u"input_tokens"].tokens] == [u"This", u"is", u"a"]
        assert [t.text for t in instances[0].fields[u"output_tokens"].tokens] == [u"is", u"a", u"sentence"]

        assert [t.text for t in instances[1].fields[u"input_tokens"].tokens] == [u"sentence", u"for", u"language"]
        assert [t.text for t in instances[1].fields[u"output_tokens"].tokens] == [u"for", u"language", u"modelling"]

        assert [t.text for t in instances[2].fields[u"input_tokens"].tokens] == [u"modelling", u".", u"Here"]
        assert [t.text for t in instances[2].fields[u"output_tokens"].tokens] == [u".", u"Here", u"'s"]

        assert [t.text for t in instances[3].fields[u"input_tokens"].tokens] == [u"'s", u"another", u"one"]
        assert [t.text for t in instances[3].fields[u"output_tokens"].tokens] == [u"another", u"one", u"for"]

        assert [t.text for t in instances[4].fields[u"input_tokens"].tokens] == [u"for", u"extra", u"language"]
        assert [t.text for t in instances[4].fields[u"output_tokens"].tokens] == [u"extra", u"language", u"modelling"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:24,代碼來源:language_modeling_dataset_test.py

示例4: test_read_from_file

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file(self, lazy):
        reader = QuoraParaphraseDatasetReader(lazy=lazy)
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'quora_paraphrase.tsv')
        instances = ensure_list(instances)

        instance1 = {u"premise": u"What should I do to avoid sleeping in class ?".split(),
                     u"hypothesis": u"How do I not sleep in a boring class ?".split(),
                     u"label": u"1"}

        instance2 = {u"premise": u"Do women support each other more than men do ?".split(),
                     u"hypothesis": u"Do women need more compliments than men ?".split(),
                     u"label": u"0"}

        instance3 = {u"premise": u"How can one root android devices ?".split(),
                     u"hypothesis": u"How do I root an Android device ?".split(),
                     u"label": u"1"}

        assert len(instances) == 3

        for instance, expected_instance in izip(instances, [instance1, instance2, instance3]):
            fields = instance.fields
            assert [t.text for t in fields[u"premise"].tokens] == expected_instance[u"premise"]
            assert [t.text for t in fields[u"hypothesis"].tokens] == expected_instance[u"hypothesis"]
            assert fields[u"label"].label == expected_instance[u"label"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:26,代碼來源:quora_paraphrase_test.py

示例5: test_read_from_file

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file(self, lazy):
        reader = StanfordSentimentTreeBankDatasetReader(lazy=lazy)
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"4"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}
        instance3 = {u"tokens": [u"Chomp", u"chomp", u"!"],
                     u"label": u"2"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:24,代碼來源:stanford_sentiment_tree_bank_test.py

示例6: test_use_subtrees

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_use_subtrees(self):
        reader = StanfordSentimentTreeBankDatasetReader(use_subtrees=True)
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"4"}
        instance2 = {u"tokens": [u"The", u"actors"],
                     u"label": u"2"}
        instance3 = {u"tokens": [u"The"],
                     u"label": u"2"}

        assert len(instances) == 21
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:24,代碼來源:stanford_sentiment_tree_bank_test.py

示例7: test_3_class

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_3_class(self):
        reader = StanfordSentimentTreeBankDatasetReader(granularity=u"3-class")
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"2"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}
        instance3 = {u"tokens": [u"Chomp", u"chomp", u"!"],
                     u"label": u"1"}

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance3[u"tokens"]
        assert fields[u"label"].label == instance3[u"label"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:24,代碼來源:stanford_sentiment_tree_bank_test.py

示例8: test_2_class

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_2_class(self):
        reader = StanfordSentimentTreeBankDatasetReader(granularity=u"2-class")
        instances = reader.read(self.sst_path)
        instances = ensure_list(instances)

        instance1 = {u"tokens": [u"The", u"actors", u"are", u"fantastic", u"."],
                     u"label": u"1"}
        instance2 = {u"tokens": [u"It", u"was", u"terrible", u"."],
                     u"label": u"0"}

        assert len(instances) == 2
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance1[u"tokens"]
        assert fields[u"label"].label == instance1[u"label"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == instance2[u"tokens"]
        assert fields[u"label"].label == instance2[u"label"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:19,代碼來源:stanford_sentiment_tree_bank_test.py

示例9: test_default_format

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_default_format(self, lazy):
        reader = SequenceTaggingDatasetReader(lazy=lazy)
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'sequence_tagging.tsv')
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"cats", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"dogs", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"snakes", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[3].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"birds", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:20,代碼來源:sequence_tagging_test.py

示例10: test_brown_corpus_format

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_brown_corpus_format(self):
        reader = SequenceTaggingDatasetReader(word_tag_delimiter=u'/')
        instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'brown_corpus.txt')
        instances = ensure_list(instances)

        assert len(instances) == 4
        fields = instances[0].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"cats", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"dogs", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"snakes", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"]
        fields = instances[3].fields
        assert [t.text for t in fields[u"tokens"].tokens] == [u"birds", u"are", u"animals", u"."]
        assert fields[u"tags"].labels == [u"N", u"V", u"N", u"N"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:20,代碼來源:sequence_tagging_test.py

示例11: test_read_from_file

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file(self, lazy, coding_scheme):
        conll_reader = Conll2003DatasetReader(lazy=lazy, coding_scheme=coding_scheme)
        instances = conll_reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'conll2003.txt'))
        instances = ensure_list(instances)

        if coding_scheme == u'IOB1':
            expected_labels = [u'I-ORG', u'O', u'I-PER', u'O', u'O', u'I-LOC', u'O']
        else:
            expected_labels = [u'U-ORG', u'O', u'U-PER', u'O', u'O', u'U-LOC', u'O']

        fields = instances[0].fields
        tokens = [t.text for t in fields[u'tokens'].tokens]
        assert tokens == [u'U.N.', u'official', u'Ekeus', u'heads', u'for', u'Baghdad', u'.']
        assert fields[u"tags"].labels == expected_labels

        fields = instances[1].fields
        tokens = [t.text for t in fields[u'tokens'].tokens]
        assert tokens == [u'AI2', u'engineer', u'Joel', u'lives', u'in', u'Seattle', u'.']
        assert fields[u"tags"].labels == expected_labels 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:21,代碼來源:conll2003_dataset_reader_test.py

示例12: test_default_format

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_default_format(self, lazy):
        reader = Seq2SeqDatasetReader(lazy=lazy)
        instances = reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'seq2seq_copy.tsv'))
        instances = ensure_list(instances)

        assert len(instances) == 3
        fields = instances[0].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"a", u"sentence", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"a", u"sentence", u"@end@"]
        fields = instances[1].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"another", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"this", u"is",
                                                                    u"another", u"@end@"]
        fields = instances[2].fields
        assert [t.text for t in fields[u"source_tokens"].tokens] == [u"@start@", u"all", u"these", u"sentences",
                                                                    u"should", u"get", u"copied", u"@end@"]
        assert [t.text for t in fields[u"target_tokens"].tokens] == [u"@start@", u"all", u"these", u"sentences",
                                                                    u"should", u"get", u"copied", u"@end@"] 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:23,代碼來源:seq2seq_test.py

示例13: test_read_from_file

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_read_from_file(self):
        # pylint: disable=R0201
        reader = IMDBLanguageModelingReader()
        dataset = reader.read(TestIMDBReader.DATASET_PATH)
        instances = ensure_list(dataset)

        assert len(instances) == 10
        fields = instances[0].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_0["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_0["target"]
        fields = instances[1].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_1["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_1["target"]
        fields = instances[7].fields
        assert [t.text for t in fields["source"].tokens] == TestIMDBReader.INSTANCE_7["source"]
        assert [t.text for t in fields["target"].tokens] == TestIMDBReader.INSTANCE_7["target"] 
開發者ID:dangitstam,項目名稱:topic-rnn,代碼行數:18,代碼來源:imdb_test.py

示例14: test_tacred_dataset_reader

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_tacred_dataset_reader(self):
        reader = get_reader()
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        # Check number of instances is correct
        self.assertEqual(len(instances), 3)

        # Check that first instance's tokens are correct
        tokens_0 = [x.text for x in instances[0]['tokens']]

        initial_tokens_0 = tokens_0[:6]
        expected_initial_tokens_0 = ['[CLS]', 'douglas', 'flint', '[SEP]', 'chairman', '[SEP]']
        self.assertListEqual(initial_tokens_0, expected_initial_tokens_0)

        final_tokens_0 = tokens_0[-6:]
        expected_final_tokens_0 = ['a', 'govern', '##ment', '[UNK]', '.', '[SEP]']
        self.assertListEqual(final_tokens_0, expected_final_tokens_0)

        # Check that first instances label is correct
        label_0 = instances[0]['label_ids'].label
        expected_label_0 = LABEL_MAP['per:title']
        self.assertEqual(label_0, expected_label_0) 
開發者ID:allenai,項目名稱:kb,代碼行數:24,代碼來源:test_tacred_reader.py

示例15: test_entity_mask

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import ensure_list [as 別名]
def test_entity_mask(self):
        # Check 'mask' mode has expected behavior
        reader = get_reader()
        reader.entity_masking = 'mask'
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        tokens_0 = [x.text for x in instances[0]['tokens']]
        subj_tokens_0 = tokens_0[14]
        self.assertEqual(subj_tokens_0, '[MASK]')

        tokens_0 = [x.text for x in instances[0]['tokens']]
        obj_tokens_0 = tokens_0[17]
        self.assertEqual(obj_tokens_0, '[MASK]')

        # Check 'type/role' mode has expected behavior
        reader.entity_masking = 'type/role'
        instances = ensure_list(reader.read('tests/fixtures/tacred/LDC2018T24.json'))

        tokens_0 = [x.text for x in instances[0]['tokens']]
        subj_tokens_0 = tokens_0[14]
        self.assertEqual(subj_tokens_0, '[s-person]')

        tokens_0 = [x.text for x in instances[0]['tokens']]
        obj_tokens_0 = tokens_0[17]
        self.assertEqual(obj_tokens_0, '[o-title]') 
開發者ID:allenai,項目名稱:kb,代碼行數:27,代碼來源:test_tacred_reader.py


注:本文中的allennlp.common.util.ensure_list方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。