當前位置: 首頁>>代碼示例>>Python>>正文


Python fields.Field方法代碼示例

本文整理匯總了Python中allennlp.data.fields.Field方法的典型用法代碼示例。如果您正苦於以下問題:Python fields.Field方法的具體用法?Python fields.Field怎麽用?Python fields.Field使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.data.fields的用法示例。


在下文中一共展示了fields.Field方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self, context_tokens: List[Token], tokens: List[Token], tags: List[str] = None,
        intents: List[str] = None, dialog_act: Dict[str, Any] = None) -> Instance:  # type: ignore
        """
        We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
        """
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        # print([t.text for t in context_tokens])
        fields["context_tokens"] = TextField(context_tokens, self._token_indexers)
        fields["tokens"] = TextField(tokens, self._token_indexers)
        fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
        if tags is not None:
            fields["tags"] = SequenceLabelField(tags, fields["tokens"])
        if intents is not None:
            fields["intents"] = MultiLabelField(intents, label_namespace="intent_labels")
        if dialog_act is not None:
            fields["metadata"] = MetadataField({"words": [x.text for x in tokens],
            'dialog_act': dialog_act})
        else:
            fields["metadata"] = MetadataField({"words": [x.text for x in tokens], 'dialog_act': {}})
        return Instance(fields) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:23,代碼來源:dataset_reader.py

示例2: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self, tokens: List[Token], tags: List[str] = None, domain: str = None,
        intent: str = None, dialog_act: Dict[str, Any] = None) -> Instance:  # type: ignore
        """
        We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
        """
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        sequence = TextField(tokens, self._token_indexers)
        fields["tokens"] = sequence
        if tags:
            fields["tags"] = SequenceLabelField(tags, sequence)
        if domain:
            fields["domain"] = LabelField(domain, label_namespace="domain_labels")
        if intent:
            fields["intent"] = LabelField(intent, label_namespace="intent_labels")
        if dialog_act is not None:
            fields["metadata"] = MetadataField({"words": [x.text for x in tokens],
            'dialog_act': dialog_act})
        else:
            fields["metadata"] = MetadataField({"words": [x.text for x in tokens], 'dialog_act': {}})
        return Instance(fields) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:23,代碼來源:dataset_reader.py

示例3: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         tokens: List[str],
                         entity_1: Tuple[int],
                         entity_2: Tuple[int],
                         label: str = None) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        
        tokens = [OpenAISplitter._standardize(token) for token in tokens]
        tokens = ['__start__'] + tokens[entity_1[0]:entity_1[1]+1] + ['__del1__'] + tokens[entity_2[0]:entity_2[1]+1] + ['__del2__'] + tokens + ['__clf__']
            
        sentence = TextField([Token(text=t) for t in tokens], self._token_indexers)
        fields['sentence'] = sentence
        #fields['entity1'] = SpanField(*entity_1, sequence_field=sentence)
        #fields['entity2'] = SpanField(*entity_2, sequence_field=sentence)
        
        if label:
            fields['label'] = LabelField(label)

        return Instance(fields) 
開發者ID:DFKI-NLP,項目名稱:DISTRE,代碼行數:22,代碼來源:semeval_2010_task_8_reader.py

示例4: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         item_id: Any,
                         question_text: str,
                         choice_text_list: List[str],
                         answer_id: int
                         ) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        question_tokens = self._tokenizer.tokenize(question_text)
        choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
        fields['question'] = TextField(question_tokens, self._token_indexers)
        fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
        fields['label'] = LabelField(answer_id, skip_indexing=True)

        metadata = {
            "id": item_id,
            "question_text": question_text,
            "choice_text_list": choice_text_list,
            "question_tokens": [x.text for x in question_tokens],
            "choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
        }

        fields["metadata"] = MetadataField(metadata)

        return Instance(fields) 
開發者ID:allenai,項目名稱:OpenBookQA,代碼行數:27,代碼來源:arc_multichoice_json_reader.py

示例5: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,
                         premise: str,
                         hypothesis: str,
                         hypothesis_structure: str,
                         label: str = None) -> Instance:
        fields: Dict[str, Field] = {}
        premise_tokens = self._tokenizer.tokenize(premise)[-self._max_tokens:]
        hypothesis_tokens = self._tokenizer.tokenize(hypothesis)[-self._max_tokens:]

        fields['premise'] = TextField(premise_tokens, self._token_indexers)
        fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)
        metadata = {
            'premise': premise,
            'hypothesis': hypothesis,
            'premise_tokens': [token.text for token in premise_tokens],
            'hypothesis_tokens': [token.text for token in hypothesis_tokens]
        }
        fields['metadata'] = MetadataField(metadata)
        self._add_structure_to_fields(hypothesis_structure, fields)
        if label:
            fields['label'] = LabelField(label)
        return Instance(fields) 
開發者ID:allenai,項目名稱:scitail,代碼行數:24,代碼來源:entailment_tuple_reader.py

示例6: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         premise: str,
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}

        premise_tokens = [Token(t) for t in premise.split(' ')]  # Removing code for parentheses in NLI
        hypothesis_tokens = [Token(t) for t in hypothesis.split(' ')]

        if self.max_l is not None:
            premise_tokens = premise_tokens[:self.max_l]
            hypothesis_tokens = hypothesis_tokens[:self.max_l]

        fields['premise'] = TextField(premise_tokens, self._token_indexers)
        fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)

        if label:
            fields['selection_label'] = LabelField(label, label_namespace='selection_labels')

        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields) 
開發者ID:easonnie,項目名稱:combine-FEVER-NSMN,代碼行數:27,代碼來源:fever_sselection_reader.py

示例7: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         premise: str,
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}

        premise_tokens = [Token(t) for t in premise.split(' ')]  # Removing code for parentheses in NLI
        hypothesis_tokens = [Token(t) for t in hypothesis.split(' ')]

        if self.max_l is not None:
            premise_tokens = premise_tokens[:self.max_l]
            hypothesis_tokens = hypothesis_tokens[:self.max_l]

        fields['premise'] = TextField(premise_tokens, self._token_indexers)
        fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)

        if label:
            fields['label'] = LabelField(label, label_namespace='labels')

        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields) 
開發者ID:easonnie,項目名稱:combine-FEVER-NSMN,代碼行數:27,代碼來源:fever_reader.py

示例8: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(
        self,  # type: ignore
        query: List[str],
        slot_tags: List[str] = None,
        sql_template: str = None,
    ) -> Instance:
        fields: Dict[str, Field] = {}
        tokens = TextField([Token(t) for t in query], self._token_indexers)
        fields["tokens"] = tokens

        if slot_tags is not None and sql_template is not None:
            slot_field = SequenceLabelField(slot_tags, tokens, label_namespace="slot_tags")
            template = LabelField(sql_template, label_namespace="template_labels")
            fields["slot_tags"] = slot_field
            fields["template"] = template

        return Instance(fields) 
開發者ID:allenai,項目名稱:allennlp-semparse,代碼行數:19,代碼來源:template_text2sql.py

示例9: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self, # pylint: disable=arguments-differ
                         premise: str,
                         hypothesis: str,
                         label: str = None) -> Instance:
        fields: Dict[str, Field] = {}
        premise_tokens = [Token(token.text)
                          for token in self._tokenizer.tokenize(premise)[-self._max_tokens:]]
        hypothesis_tokens = [Token(token.text)
                             for token in self._tokenizer.tokenize(hypothesis)[-self._max_tokens:]]

        fields['premise'] = TextField(premise_tokens, self._token_indexers)
        fields['hypothesis'] = TextField(hypothesis_tokens, self._token_indexers)

        if label:
            fields['label'] = LabelField(label)

        # metadata = {"premise_tokens": [x.text for x in premise_tokens],
        #             "hypothesis_tokens": [x.text for x in hypothesis_tokens]}
        # fields["metadata"] = MetadataField(metadata)
        return Instance(fields) 
開發者ID:StonyBrookNLP,項目名稱:multee,代碼行數:22,代碼來源:entailment_pair.py

示例10: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         premise: str,
                         hypotheses: List[str],
                         label: int = None) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        premise_tokens = self._tokenizer.tokenize(premise)
        fields['premise'] = TextField(premise_tokens, self._token_indexers)

        # This could be another way to get randomness
        for i, hyp in enumerate(hypotheses):
            hypothesis_tokens = self._tokenizer.tokenize(hyp)
            fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers)

        if label is not None:
            fields['label'] = LabelField(label, skip_indexing=True)
        return Instance(fields) 
開發者ID:rowanz,項目名稱:swagaf,代碼行數:19,代碼來源:dataset_reader.py

示例11: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         premise: str,
                         hypotheses: List[str],
                         label: int = None) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        # premise_tokens = self._tokenizer.tokenize(premise)
        # fields['premise'] = TextField(premise_tokens, self._token_indexers)

        # This could be another way to get randomness
        for i, hyp in enumerate(hypotheses):
            hypothesis_tokens = self._tokenizer.tokenize(hyp)
            fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers)

        if label is not None:
            fields['label'] = LabelField(label, skip_indexing=True)
        return Instance(fields) 
開發者ID:rowanz,項目名稱:swagaf,代碼行數:19,代碼來源:dataset_reader.py

示例12: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self, vec: str = None) -> Instance:  # type: ignore
        """
        Parameters
        ----------
        text : ``str``, required.
            The text to classify
        label ``str``, optional, (default = None).
            The label for this text.

        Returns
        -------
        An ``Instance`` containing the following fields:
            tokens : ``TextField``
                The tokens in the sentence or phrase.
            label : ``LabelField``
                The label label of the sentence or phrase.
        """
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        fields['tokens'] = ArrayField(vec)
        return Instance(fields) 
開發者ID:allenai,項目名稱:vampire,代碼行數:23,代碼來源:vampire_reader.py

示例13: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         item_id: Any,
                         question_text: str,
                         choice_text_list: List[str],
                         answer_id: int) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        question_tokens = self._tokenizer.tokenize(question_text)
        choices_tokens_list = [self._tokenizer.tokenize(x) for x in choice_text_list]
        fields['question'] = TextField(question_tokens, self._token_indexers)
        fields['choices_list'] = ListField([TextField(x, self._token_indexers) for x in choices_tokens_list])
        fields['label'] = LabelField(answer_id, skip_indexing=True)

        metadata = {
           "id": item_id,
           "question_text": question_text,
           "choice_text_list": choice_text_list,
           "question_tokens": [x.text for x in question_tokens],
           "choice_tokens_list": [[x.text for x in ct] for ct in choices_tokens_list],
        }

        fields["metadata"] = MetadataField(metadata)

        return Instance(fields) 
開發者ID:allenai,項目名稱:ARC-Solvers,代碼行數:26,代碼來源:arc_multichoice_json_reader.py

示例14: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self, state: np.ndarray, action: int = None) -> Instance:  # type: ignore
        """
        """
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        fields["states"] = ArrayField(state)
        if action is not None:
            fields["actions"] = LabelField(action, skip_indexing=True)
        return Instance(fields) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:11,代碼來源:dataset_reader.py

示例15: text_to_instance

# 需要導入模塊: from allennlp.data import fields [as 別名]
# 或者: from allennlp.data.fields import Field [as 別名]
def text_to_instance(self,  # type: ignore
                         sentence: str,
                         head: str,
                         tail: str,
                         head_type: str=None,
                         tail_type: str=None,
                         label: str=None) -> Instance:
        # pylint: disable=arguments-differ
        fields: Dict[str, Field] = {}
        
        instance_id = f'{head}#{tail}'
        if label:
            instance_id = f'{instance_id}#{label}'

        fields['metadata'] = MetadataField({'instance_id': instance_id.lower()})

        tokens = self._token_splitter.split_words(sentence)
        head = self._token_splitter.split_words(head)
        tail = self._token_splitter.split_words(tail)

        # TODO: this should not be done here

        if self._masking_mode == 'ner_least_specific':
            logger.info(f"Using masking mode 'ner_least_specific'.")
            tokens = ([Token('__start__')]
                      + head + [Token('__del1__')] + head_type + [Token('__ent1__')]
                      + tail + [Token('__del2__')] + tail_type + [Token('__ent2__')]
                      + tokens + [Token('__clf__')])
        else:
            tokens = [Token('__start__')] + head + [Token('__del1__')] + tail + [Token('__del2__')] + tokens + [Token('__clf__')]

        fields['sentence'] = TextField(tokens, self._token_indexers)
        
        if label:
            fields['label'] = LabelField(label)

        return Instance(fields) 
開發者ID:DFKI-NLP,項目名稱:DISTRE,代碼行數:39,代碼來源:open_nre_nyt_reader.py


注:本文中的allennlp.data.fields.Field方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。