本文整理匯總了Python中allennlp.data.fields.ListField方法的典型用法代碼示例。如果您正苦於以下問題:Python fields.ListField方法的具體用法?Python fields.ListField怎麽用?Python fields.ListField使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.data.fields
的用法示例。
在下文中一共展示了fields.ListField方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: text_to_instance
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def text_to_instance(self, # type: ignore
item_id: Any,
question_text: str,
choice_text_list: List[str],
answer_id: int
) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
question_tokens = self._tokenizer.tokenize(question_text)
choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
fields['question'] = TextField(question_tokens, self._token_indexers)
fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
fields['label'] = LabelField(answer_id, skip_indexing=True)
metadata = {
"id": item_id,
"question_text": question_text,
"choice_text_list": choice_text_list,
"question_tokens": [x.text for x in question_tokens],
"choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
}
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
示例2: test_padding_handles_list_fields_with_padding_values
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_padding_handles_list_fields_with_padding_values(self):
array1 = ArrayField(numpy.ones([2, 3]), padding_value=-1)
array2 = ArrayField(numpy.ones([1, 5]), padding_value=-1)
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = (
list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
)
correct_tensor = numpy.array(
[
[[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]],
[[1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
]
)
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
示例3: test_fields_can_pad_to_greater_than_max_length
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_fields_can_pad_to_greater_than_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
padding_lengths[u"list_num_tokens"] = 7
padding_lengths[u"num_fields"] = 5
tensor_dict = list_field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][3].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"][4].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]))
示例4: text_to_instance
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def text_to_instance(self, text: str, sentences: List[str] = None, tags: List[int] = None) -> Instance:
if sentences is None:
if self._language == "ru":
sentences = [s.text for s in razdel.sentenize(text)]
else:
sentences = nltk.tokenize.sent_tokenize(text)
sentences_tokens = []
for sentence in sentences[:self._max_sentences_count]:
sentence = sentence.lower() if self._lowercase else sentence
tokens = self._tokenizer.tokenize(sentence)[:self._sentence_max_tokens]
tokens.insert(0, Token(START_SYMBOL))
tokens.append(Token(END_SYMBOL))
indexed_tokens = TextField(tokens, self._source_token_indexers)
sentences_tokens.append(indexed_tokens)
sentences_tokens_indexed = ListField(sentences_tokens)
result = {'source_sentences': sentences_tokens_indexed}
if tags:
result["sentences_tags"] = SequenceLabelField(tags[:self._max_sentences_count], sentences_tokens_indexed)
return Instance(result)
示例5: text_to_instance
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def text_to_instance(self, # type: ignore
item_id: Any,
question_text: str,
choice_text_list: List[str],
answer_id: int) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
question_tokens = self._tokenizer.tokenize(question_text)
choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
fields['question'] = TextField(question_tokens, self._token_indexers)
fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
fields['label'] = LabelField(answer_id, skip_indexing=True)
metadata = {
"id": item_id,
"question_text": question_text,
"choice_text_list": choice_text_list,
"question_tokens": [x.text for x in question_tokens],
"choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
}
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
示例6: text_to_instance
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def text_to_instance(
self, # type: ignore
context: List[List[str]],
question: List[str],
answer: str,
supports: List[int],
) -> Instance:
fields: Dict[str, Field] = {}
if self._keep_sentences:
context_field_ks = ListField(
[
TextField([Token(word) for word in line], self._token_indexers)
for line in context
]
)
fields["supports"] = ListField(
[IndexField(support, context_field_ks) for support in supports]
)
else:
context_field = TextField(
[Token(word) for line in context for word in line], self._token_indexers
)
fields["context"] = context_field_ks if self._keep_sentences else context_field
fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
fields["answer"] = TextField([Token(answer)], self._token_indexers)
return Instance(fields)
示例7: text_to_instance
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def text_to_instance(
self, text: str, label: Union[str, int] = None
) -> Instance: # type: ignore
"""
# Parameters
text : `str`, required.
The text to classify
label : `str`, optional, (default = `None`).
The label for this text.
# Returns
An `Instance` containing the following fields:
- tokens (`TextField`) :
The tokens in the sentence or phrase.
- label (`LabelField`) :
The label label of the sentence or phrase.
"""
fields: Dict[str, Field] = {}
if self._segment_sentences:
sentences: List[Field] = []
sentence_splits = self._sentence_segmenter.split_sentences(text)
for sentence in sentence_splits:
word_tokens = self._tokenizer.tokenize(sentence)
if self._max_sequence_length is not None:
word_tokens = self._truncate(word_tokens)
sentences.append(TextField(word_tokens, self._token_indexers))
fields["tokens"] = ListField(sentences)
else:
tokens = self._tokenizer.tokenize(text)
if self._max_sequence_length is not None:
tokens = self._truncate(tokens)
fields["tokens"] = TextField(tokens, self._token_indexers)
if label is not None:
fields["label"] = LabelField(label, skip_indexing=self._skip_label_indexing)
return Instance(fields)
示例8: test_elmo_empty_token_list
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_elmo_empty_token_list(self):
# Basic test
indexer = ELMoTokenCharactersIndexer()
assert {"elmo_tokens": []} == indexer.get_empty_token_list()
# Real world test
indexer = {"elmo": indexer}
tokens_1 = TextField([Token("Apple")], indexer)
targets_1 = ListField([TextField([Token("Apple")], indexer)])
tokens_2 = TextField([Token("Screen"), Token("device")], indexer)
targets_2 = ListField(
[TextField([Token("Screen")], indexer), TextField([Token("Device")], indexer)]
)
instance_1 = Instance({"tokens": tokens_1, "targets": targets_1})
instance_2 = Instance({"tokens": tokens_2, "targets": targets_2})
a_batch = Batch([instance_1, instance_2])
a_batch.index_instances(Vocabulary())
batch_tensor = a_batch.as_tensor_dict()
elmo_target_token_indices = batch_tensor["targets"]["elmo"]["elmo_tokens"]
# The TextField that is empty should have been created using the
# `get_empty_token_list` and then padded with zeros.
empty_target = elmo_target_token_indices[0][1].numpy()
np.testing.assert_array_equal(np.zeros((1, 50)), empty_target)
non_empty_targets = [
elmo_target_token_indices[0][0],
elmo_target_token_indices[1][0],
elmo_target_token_indices[1][1],
]
for non_empty_target in non_empty_targets:
with pytest.raises(AssertionError):
np.testing.assert_array_equal(np.zeros((1, 50)), non_empty_target)
示例9: test_get_padding_lengths
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_get_padding_lengths(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
lengths = list_field.get_padding_lengths()
assert lengths == {"num_fields": 3, "list_words___tokens": 5}
示例10: test_list_field_can_handle_empty_index_fields
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_list_field_can_handle_empty_index_fields(self):
list_field = ListField([self.index_field, self.index_field, self.empty_index_field])
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1], [1], [-1]])
)
示例11: test_list_field_can_handle_empty_sequence_label_fields
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_list_field_can_handle_empty_sequence_label_fields(self):
list_field = ListField(
[self.sequence_label_field, self.sequence_label_field, self.empty_sequence_label_field]
)
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1, 1, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]])
)
示例12: test_all_fields_padded_to_max_length
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_all_fields_padded_to_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(), numpy.array([2, 3, 4, 5, 0])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(), numpy.array([2, 3, 4, 1, 5])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(), numpy.array([2, 3, 1, 5, 0])
)
示例13: test_nested_list_fields_are_padded_correctly
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_nested_list_fields_are_padded_correctly(self):
nested_field1 = ListField([LabelField(c) for c in ["a", "b", "c", "d", "e"]])
nested_field2 = ListField([LabelField(c) for c in ["f", "g", "h", "i", "j", "k"]])
list_field = ListField([nested_field1.empty_field(), nested_field1, nested_field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
assert padding_lengths == {"num_fields": 3, "list_num_fields": 6}
tensor = list_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_almost_equal(
tensor, [[-1, -1, -1, -1, -1, -1], [0, 1, 2, 3, 4, -1], [5, 6, 7, 8, 9, 10]]
)
示例14: test_fields_can_pad_to_greater_than_max_length
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_fields_can_pad_to_greater_than_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
padding_lengths["list_words___tokens"] = 7
padding_lengths["num_fields"] = 5
tensor_dict = list_field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][3].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][4].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
示例15: test_printing_doesnt_crash
# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import ListField [as 別名]
def test_printing_doesnt_crash(self):
list_field = ListField([self.field1, self.field2])
print(list_field)