本文整理汇总了Python中allennlp.data.vocabulary.Vocabulary.get_token_from_index方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.get_token_from_index方法的具体用法?Python Vocabulary.get_token_from_index怎么用?Python Vocabulary.get_token_from_index使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.data.vocabulary.Vocabulary
的用法示例。
在下文中一共展示了Vocabulary.get_token_from_index方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_set_from_file_reads_padded_files
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def test_set_from_file_reads_padded_files(self):
# pylint: disable=protected-access
vocab_filename = self.TEST_DIR / 'vocab_file'
with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file:
vocab_file.write('<S>\n')
vocab_file.write('</S>\n')
vocab_file.write('<UNK>\n')
vocab_file.write('a\n')
vocab_file.write('tricky\x0bchar\n')
vocab_file.write('word\n')
vocab_file.write('another\n')
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=True, oov_token="<UNK>")
assert vocab._oov_token == DEFAULT_OOV_TOKEN
assert vocab.get_token_index("random string") == 3
assert vocab.get_token_index("<S>") == 1
assert vocab.get_token_index("</S>") == 2
assert vocab.get_token_index(DEFAULT_OOV_TOKEN) == 3
assert vocab.get_token_index("a") == 4
assert vocab.get_token_index("tricky\x0bchar") == 5
assert vocab.get_token_index("word") == 6
assert vocab.get_token_index("another") == 7
assert vocab.get_token_from_index(0) == vocab._padding_token
assert vocab.get_token_from_index(1) == "<S>"
assert vocab.get_token_from_index(2) == "</S>"
assert vocab.get_token_from_index(3) == DEFAULT_OOV_TOKEN
assert vocab.get_token_from_index(4) == "a"
assert vocab.get_token_from_index(5) == "tricky\x0bchar"
assert vocab.get_token_from_index(6) == "word"
assert vocab.get_token_from_index(7) == "another"
示例2: test_add_word_to_index_gives_consistent_results
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def test_add_word_to_index_gives_consistent_results(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
# Now add it again, and make sure nothing changes.
vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
示例3: test_namespaces
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def test_namespaces(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word", namespace='1')
assert "word" in vocab.get_index_to_token_vocabulary(namespace='1').values()
assert vocab.get_token_index("word", namespace='1') == word_index
assert vocab.get_token_from_index(word_index, namespace='1') == "word"
assert vocab.get_vocab_size(namespace='1') == initial_vocab_size + 1
# Now add it again, in a different namespace and a different word, and make sure it's like
# new.
word2_index = vocab.add_token_to_namespace("word2", namespace='2')
word_index = vocab.add_token_to_namespace("word", namespace='2')
assert "word" in vocab.get_index_to_token_vocabulary(namespace='2').values()
assert "word2" in vocab.get_index_to_token_vocabulary(namespace='2').values()
assert vocab.get_token_index("word", namespace='2') == word_index
assert vocab.get_token_index("word2", namespace='2') == word2_index
assert vocab.get_token_from_index(word_index, namespace='2') == "word"
assert vocab.get_token_from_index(word2_index, namespace='2') == "word2"
assert vocab.get_vocab_size(namespace='2') == initial_vocab_size + 2
示例4: _get_vocab_index_mapping
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def _get_vocab_index_mapping(self, archived_vocab: Vocabulary) -> List[Tuple[int, int]]:
vocab_index_mapping: List[Tuple[int, int]] = []
for index in range(self.vocab.get_vocab_size(namespace='tokens')):
token = self.vocab.get_token_from_index(index=index, namespace='tokens')
archived_token_index = archived_vocab.get_token_index(token, namespace='tokens')
# Checking if we got the UNK token index, because we don't want all new token
# representations initialized to UNK token's representation. We do that by checking if
# the two tokens are the same. They will not be if the token at the archived index is
# UNK.
if archived_vocab.get_token_from_index(archived_token_index, namespace="tokens") == token:
vocab_index_mapping.append((index, archived_token_index))
return vocab_index_mapping
示例5: test_set_from_file_reads_non_padded_files
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def test_set_from_file_reads_non_padded_files(self):
# pylint: disable=protected-access
vocab_filename = self.TEST_DIR / 'vocab_file'
with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file:
vocab_file.write('B-PERS\n')
vocab_file.write('I-PERS\n')
vocab_file.write('O\n')
vocab_file.write('B-ORG\n')
vocab_file.write('I-ORG\n')
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=False, namespace='tags')
assert vocab.get_token_index("B-PERS", namespace='tags') == 0
assert vocab.get_token_index("I-PERS", namespace='tags') == 1
assert vocab.get_token_index("O", namespace='tags') == 2
assert vocab.get_token_index("B-ORG", namespace='tags') == 3
assert vocab.get_token_index("I-ORG", namespace='tags') == 4
assert vocab.get_token_from_index(0, namespace='tags') == "B-PERS"
assert vocab.get_token_from_index(1, namespace='tags') == "I-PERS"
assert vocab.get_token_from_index(2, namespace='tags') == "O"
assert vocab.get_token_from_index(3, namespace='tags') == "B-ORG"
assert vocab.get_token_from_index(4, namespace='tags') == "I-ORG"
示例6: test_from_params_valid_vocab_extension_thoroughly
# 需要导入模块: from allennlp.data.vocabulary import Vocabulary [as 别名]
# 或者: from allennlp.data.vocabulary.Vocabulary import get_token_from_index [as 别名]
def test_from_params_valid_vocab_extension_thoroughly(self):
'''
Tests for Valid Vocab Extension thoroughly: Vocab extension is valid
when overlapping namespaces have same padding behaviour (padded/non-padded)
Summary of namespace paddings in this test:
original_vocab namespaces
tokens0 padded
tokens1 non-padded
tokens2 padded
tokens3 non-padded
instances namespaces
tokens0 padded
tokens1 non-padded
tokens4 padded
tokens5 non-padded
TypicalExtention example: (of tokens1 namespace)
-> original_vocab index2token
apple #0->apple
bat #1->bat
cat #2->cat
-> Token to be extended with: cat, an, apple, banana, atom, bat
-> extended_vocab: index2token
apple #0->apple
bat #1->bat
cat #2->cat
an #3->an
atom #4->atom
banana #5->banana
'''
vocab_dir = self.TEST_DIR / 'vocab_save'
original_vocab = Vocabulary(non_padded_namespaces=["tokens1", "tokens3"])
original_vocab.add_token_to_namespace("apple", namespace="tokens0") # index:2
original_vocab.add_token_to_namespace("bat", namespace="tokens0") # index:3
original_vocab.add_token_to_namespace("cat", namespace="tokens0") # index:4
original_vocab.add_token_to_namespace("apple", namespace="tokens1") # index:0
original_vocab.add_token_to_namespace("bat", namespace="tokens1") # index:1
original_vocab.add_token_to_namespace("cat", namespace="tokens1") # index:2
original_vocab.add_token_to_namespace("a", namespace="tokens2") # index:0
original_vocab.add_token_to_namespace("b", namespace="tokens2") # index:1
original_vocab.add_token_to_namespace("c", namespace="tokens2") # index:2
original_vocab.add_token_to_namespace("p", namespace="tokens3") # index:0
original_vocab.add_token_to_namespace("q", namespace="tokens3") # index:1
original_vocab.save_to_files(vocab_dir)
text_field0 = TextField([Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens0": SingleIdTokenIndexer("tokens0")})
text_field1 = TextField([Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens1": SingleIdTokenIndexer("tokens1")})
text_field4 = TextField([Token(t) for t in ["l", "m", "n", "o"]],
{"tokens4": SingleIdTokenIndexer("tokens4")})
text_field5 = TextField([Token(t) for t in ["x", "y", "z"]],
{"tokens5": SingleIdTokenIndexer("tokens5")})
instances = Batch([Instance({"text0": text_field0, "text1": text_field1,
"text4": text_field4, "text5": text_field5})])
params = Params({"directory_path": vocab_dir,
"extend": True,
"non_padded_namespaces": ["tokens1", "tokens5"]})
extended_vocab = Vocabulary.from_params(params, instances)
# namespaces: tokens0, tokens1 is common.
# tokens2, tokens3 only vocab has. tokens4, tokens5 only instances
extended_namespaces = {*extended_vocab._token_to_index}
assert extended_namespaces == {"tokens{}".format(i) for i in range(6)}
# # Check that _non_padded_namespaces list is consistent after extension
assert extended_vocab._non_padded_namespaces == {"tokens1", "tokens3", "tokens5"}
# # original_vocab["tokens1"] has 3 tokens, instances of "tokens1" ns has 5 tokens. 2 overlapping
assert extended_vocab.get_vocab_size("tokens1") == 6
assert extended_vocab.get_vocab_size("tokens0") == 8 # 2 extra overlapping because padded
# namespace tokens3, tokens4 was only in original_vocab,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens2") == original_vocab.get_vocab_size("tokens2")
assert extended_vocab.get_vocab_size("tokens3") == original_vocab.get_vocab_size("tokens3")
# namespace tokens2 was only in instances,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens4") == 6 # l,m,n,o + oov + padding
assert extended_vocab.get_vocab_size("tokens5") == 3 # x,y,z
# Word2index mapping of all words in all namespaces of original_vocab
# should be maintained in extended_vocab
for namespace, token2index in original_vocab._token_to_index.items():
for token, _ in token2index.items():
vocab_index = original_vocab.get_token_index(token, namespace)
extended_vocab_index = extended_vocab.get_token_index(token, namespace)
assert vocab_index == extended_vocab_index
# And same for Index2Word mapping
for namespace, index2token in original_vocab._index_to_token.items():
for index, _ in index2token.items():
vocab_token = original_vocab.get_token_from_index(index, namespace)
extended_vocab_token = extended_vocab.get_token_from_index(index, namespace)
assert vocab_token == extended_vocab_token