本文整理汇总了Python中allennlp.data.fields.LabelField方法的典型用法代码示例。如果您正苦于以下问题:Python fields.LabelField方法的具体用法?Python fields.LabelField怎么用?Python fields.LabelField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.data.fields
的用法示例。
在下文中一共展示了fields.LabelField方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
tokens: List[str],
entity_1: Tuple[int],
entity_2: Tuple[int],
label: str = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
tokens = [OpenAISplitter._standardize(token) for token in tokens]
tokens = ['__start__'] + tokens[entity_1[0]:entity_1[1]+1] + ['__del1__'] + tokens[entity_2[0]:entity_2[1]+1] + ['__del2__'] + tokens + ['__clf__']
sentence = TextField([Token(text=t) for t in tokens], self._token_indexers)
fields['sentence'] = sentence
#fields['entity1'] = SpanField(*entity_1, sequence_field=sentence)
#fields['entity2'] = SpanField(*entity_2, sequence_field=sentence)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
示例2: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, tokens: List[Token], tags: List[str] = None, domain: str = None,
intent: str = None, dialog_act: Dict[str, Any] = None) -> Instance: # type: ignore
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
sequence = TextField(tokens, self._token_indexers)
fields["tokens"] = sequence
if tags:
fields["tags"] = SequenceLabelField(tags, sequence)
if domain:
fields["domain"] = LabelField(domain, label_namespace="domain_labels")
if intent:
fields["intent"] = LabelField(intent, label_namespace="intent_labels")
if dialog_act is not None:
fields["metadata"] = MetadataField({"words": [x.text for x in tokens],
'dialog_act': dialog_act})
else:
fields["metadata"] = MetadataField({"words": [x.text for x in tokens], 'dialog_act': {}})
return Instance(fields)
示例3: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, query_id:str, doc_id:str, query_sequence: str, doc_sequence: str) -> Instance: # type: ignore
# pylint: disable=arguments-differ
query_id_field = LabelField(int(query_id), skip_indexing=True)
doc_id_field = LabelField(int(doc_id), skip_indexing=True)
query_tokenized = self._tokenizer.tokenize(query_sequence)
if self.max_query_length > -1:
query_tokenized = query_tokenized[:self.max_query_length]
query_field = TextField(query_tokenized, self._token_indexers)
doc_tokenized = self._tokenizer.tokenize(doc_sequence)
if self.max_doc_length > -1:
doc_tokenized = doc_tokenized[:self.max_doc_length]
doc_field = TextField(doc_tokenized, self._token_indexers)
return Instance({
"query_id":query_id_field,
"doc_id":doc_id_field,
"query_tokens":query_field,
"doc_tokens":doc_field})
示例4: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
item_id: Any,
question_text: str,
choice_text_list: List[str],
answer_id: int
) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
question_tokens = self._tokenizer.tokenize(question_text)
choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
fields['question'] = TextField(question_tokens, self._token_indexers)
fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
fields['label'] = LabelField(answer_id, skip_indexing=True)
metadata = {
"id": item_id,
"question_text": question_text,
"choice_text_list": choice_text_list,
"question_tokens": [x.text for x in question_tokens],
"choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
}
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
示例5: test_duplicate
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def test_duplicate(self):
# Verify the `duplicate()` method works with a `PretrainedTransformerIndexer` in
# a `TextField`. See https://github.com/allenai/allennlp/issues/4270.
instance = Instance(
{
"words": TextField(
[Token("hello")], {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
)
}
)
other = instance.duplicate()
assert other == instance
# Adding new fields to the original instance should not effect the duplicate.
instance.add_field("labels", LabelField("some_label"))
assert "labels" not in other.fields
assert other != instance # sanity check on the '__eq__' method.
示例6: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self,
premise: str,
hypothesis: str,
hypothesis_structure: str,
label: str = None) -> Instance:
fields: Dict[str, Field] = {}
premise_tokens = self._tokenizer.tokenize(premise)[-self._max_tokens:]
hypothesis_tokens = self._tokenizer.tokenize(hypothesis)[-self._max_tokens:]
fields['premise'] = TextField(premise_tokens, self._token_indexers)
fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
metadata = {
'premise': premise,
'hypothesis': hypothesis,
'premise_tokens': [token.text for token in premise_tokens],
'hypothesis_tokens': [token.text for token in hypothesis_tokens]
}
fields['metadata'] = MetadataField(metadata)
self._add_structure_to_fields(hypothesis_structure, fields)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
示例7: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
premise: str,
hypothesis: str,
pid: str = None,
label: str = None) -> Instance:
fields: Dict[str, Field] = {}
premise_tokens = [Token(t) for t in premise.split(' ')] # Removing code for parentheses in NLI
hypothesis_tokens = [Token(t) for t in hypothesis.split(' ')]
if self.max_l is not None:
premise_tokens = premise_tokens[:self.max_l]
hypothesis_tokens = hypothesis_tokens[:self.max_l]
fields['premise'] = TextField(premise_tokens, self._token_indexers)
fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
if label:
fields['selection_label'] = LabelField(label, label_namespace='selection_labels')
if pid:
fields['pid'] = IdField(pid)
return Instance(fields)
示例8: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
premise: str,
hypothesis: str,
pid: str = None,
label: str = None) -> Instance:
fields: Dict[str, Field] = {}
premise_tokens = [Token(t) for t in premise.split(' ')] # Removing code for parentheses in NLI
hypothesis_tokens = [Token(t) for t in hypothesis.split(' ')]
if self.max_l is not None:
premise_tokens = premise_tokens[:self.max_l]
hypothesis_tokens = hypothesis_tokens[:self.max_l]
fields['premise'] = TextField(premise_tokens, self._token_indexers)
fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
if label:
fields['label'] = LabelField(label, label_namespace='labels')
if pid:
fields['pid'] = IdField(pid)
return Instance(fields)
示例9: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
premise ,
hypothesis ,
label = None) :
# pylint: disable=arguments-differ
fields = {}
premise_tokens = self._tokenizer.tokenize(premise)
hypothesis_tokens = self._tokenizer.tokenize(hypothesis)
fields[u'premise'] = TextField(premise_tokens, self._token_indexers)
fields[u'hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
if label:
fields[u'label'] = LabelField(label)
metadata = {u"premise_tokens": [x.text for x in premise_tokens],
u"hypothesis_tokens": [x.text for x in hypothesis_tokens]}
fields[u"metadata"] = MetadataField(metadata)
return Instance(fields)
示例10: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
sentence_tokens: List[str],
verb_vector: List[int],
entity_vector: List[int],
state_change_types: Optional[List[str]] = None,
state_change_tags: Optional[List[str]] = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
# encode inputs
token_field = TextField([Token(word) for word in sentence_tokens], self._token_indexers)
fields['tokens'] = token_field
fields['verb_span'] = SequenceLabelField(verb_vector, token_field, 'indicator_tags')
fields['entity_span'] = SequenceLabelField(entity_vector, token_field, 'indicator_tags')
# encode outputs
if state_change_types:
fields['state_change_type_labels'] = LabelField(state_change_types, 'state_change_type_labels')
if state_change_tags:
fields['state_change_tags'] = SequenceLabelField(state_change_tags, token_field, 'state_change_tags')
return Instance(fields)
示例11: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(
self, # type: ignore
query: List[str],
slot_tags: List[str] = None,
sql_template: str = None,
) -> Instance:
fields: Dict[str, Field] = {}
tokens = TextField([Token(t) for t in query], self._token_indexers)
fields["tokens"] = tokens
if slot_tags is not None and sql_template is not None:
slot_field = SequenceLabelField(slot_tags, tokens, label_namespace="slot_tags")
template = LabelField(sql_template, label_namespace="template_labels")
fields["slot_tags"] = slot_field
fields["template"] = template
return Instance(fields)
示例12: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # pylint: disable=arguments-differ
premise: str,
hypothesis: str,
label: str = None) -> Instance:
fields: Dict[str, Field] = {}
premise_tokens = [Token(token.text)
for token in self._tokenizer.tokenize(premise)[-self._max_tokens:]]
hypothesis_tokens = [Token(token.text)
for token in self._tokenizer.tokenize(hypothesis)[-self._max_tokens:]]
fields['premise'] = TextField(premise_tokens, self._token_indexers)
fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
if label:
fields['label'] = LabelField(label)
# metadata = {"premise_tokens": [x.text for x in premise_tokens],
# "hypothesis_tokens": [x.text for x in hypothesis_tokens]}
# fields["metadata"] = MetadataField(metadata)
return Instance(fields)
示例13: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self,
citation_text: str,
citing_paper_id: str,
cited_paper_id: str,
intent: List[str] = None,
venue: str = None,
section_name: str = None) -> Instance:
citation_tokens = self._tokenizer.tokenize(citation_text)
fields = {
'citation_text': TextField(citation_tokens, self._token_indexers),
}
if section_name is not None:
fields['section_label'] = LabelField(section_name, label_namespace="section_labels")
fields['citing_paper_id'] = MetadataField(citing_paper_id)
fields['cited_paper_id'] = MetadataField(cited_paper_id)
return Instance(fields)
示例14: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self,
citation_text: str,
citing_paper_id: str,
cited_paper_id: str,
intent: List[str] = None,
section_name: str = None) -> Instance:
citation_tokens = self._tokenizer.tokenize(citation_text)
fields = {
'citation_text': TextField(citation_tokens, self._token_indexers),
}
if section_name is not None:
fields['section_label'] = LabelField(section_name, label_namespace="section_labels")
fields['citing_paper_id'] = MetadataField(citing_paper_id)
fields['cited_paper_id'] = MetadataField(cited_paper_id)
return Instance(fields)
示例15: text_to_instance
# 需要导入模块: from allennlp.data import fields [as 别名]
# 或者: from allennlp.data.fields import LabelField [as 别名]
def text_to_instance(self, # type: ignore
premise: str,
hypotheses: List[str],
label: int = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
premise_tokens = self._tokenizer.tokenize(premise)
fields['premise'] = TextField(premise_tokens, self._token_indexers)
# This could be another way to get randomness
for i, hyp in enumerate(hypotheses):
hypothesis_tokens = self._tokenizer.tokenize(hyp)
fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers)
if label is not None:
fields['label'] = LabelField(label, skip_indexing=True)
return Instance(fields)