當前位置: 首頁>>代碼示例>>Python>>正文


Python util.JsonDict方法代碼示例

本文整理匯總了Python中allennlp.common.util.JsonDict方法的典型用法代碼示例。如果您正苦於以下問題:Python util.JsonDict方法的具體用法?Python util.JsonDict怎麽用?Python util.JsonDict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.common.util的用法示例。


在下文中一共展示了util.JsonDict方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: saliency_interpret_from_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        """
        This function finds saliency values for each input token.

        # Parameters

        inputs : `JsonDict`
            The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).

        # Returns

        interpretation : `JsonDict`
            Contains the normalized saliency values for each input token. The dict has entries for
            each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
            Each one of those entries has entries for the saliency of the inputs, e.g.,
            `{grad_input_1: ..., grad_input_2: ... }`.
        """
        raise NotImplementedError("Implement this for saliency interpretations") 
開發者ID:allenai,項目名稱:allennlp,代碼行數:20,代碼來源:saliency_interpreter.py

示例2: saliency_interpret_from_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        # Convert inputs to labeled instances
        labeled_instances = self.predictor.json_to_labeled_instances(inputs)

        instances_with_grads = dict()
        for idx, instance in enumerate(labeled_instances):
            # Run smoothgrad
            grads = self._smooth_grads(instance)

            # Normalize results
            for key, grad in grads.items():
                # TODO (@Eric-Wallace), SmoothGrad is not using times input normalization.
                # Fine for now, but should fix for consistency.

                # The [0] here is undo-ing the batching that happens in get_gradients.
                embedding_grad = numpy.sum(grad[0], axis=1)
                norm = numpy.linalg.norm(embedding_grad, ord=1)
                normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
                grads[key] = normalized_grad

            instances_with_grads["instance_" + str(idx + 1)] = grads

        return sanitize(instances_with_grads) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:25,代碼來源:smooth_gradient.py

示例3: saliency_interpret_from_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        # Convert inputs to labeled instances
        labeled_instances = self.predictor.json_to_labeled_instances(inputs)

        instances_with_grads = dict()
        for idx, instance in enumerate(labeled_instances):
            # Run integrated gradients
            grads = self._integrate_gradients(instance)

            # Normalize results
            for key, grad in grads.items():
                # The [0] here is undo-ing the batching that happens in get_gradients.
                embedding_grad = numpy.sum(grad[0], axis=1)
                norm = numpy.linalg.norm(embedding_grad, ord=1)
                normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
                grads[key] = normalized_grad

            instances_with_grads["instance_" + str(idx + 1)] = grads

        return sanitize(instances_with_grads) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:22,代碼來源:integrated_gradient.py

示例4: attack_from_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def attack_from_json(
        self,
        inputs: JsonDict = None,
        input_field_to_attack: str = "tokens",
        grad_input_field: str = "grad_input_1",
        ignore_tokens: List[str] = None,
        target: JsonDict = None,
    ):
        if target is not None:
            raise ValueError("Input reduction does not implement targeted attacks")
        ignore_tokens = ["@@NULL@@"] if ignore_tokens is None else ignore_tokens
        original_instances = self.predictor.json_to_labeled_instances(inputs)
        original_text_field: TextField = original_instances[0][  # type: ignore
            input_field_to_attack
        ]
        original_tokens = deepcopy(original_text_field.tokens)
        final_tokens = []
        for instance in original_instances:
            final_tokens.append(
                self._attack_instance(
                    inputs, instance, input_field_to_attack, grad_input_field, ignore_tokens
                )
            )
        return sanitize({"final": final_tokens, "original": original_tokens}) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:26,代碼來源:input_reduction.py

示例5: convert_qajson_to_entailment

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def convert_qajson_to_entailment(qa_json: JsonDict):
    question_text = qa_json["question"]["stem"]
    choices = qa_json["question"]["choices"]
    for choice in choices:
        choice_text = choice["text"]

        statement = create_hypothesis(get_fitb_from_question(question_text), choice_text)
        create_output_dict(qa_json, statement,  choice["label"] == qa_json.get("answerKey", "Z"))

    return qa_json


# Get a Fill-In-The-Blank (FITB) statement from the question text. E.g. "George wants to warm his
# hands quickly by rubbing them. Which skin surface will produce the most heat?" ->
# "George wants to warm his hands quickly by rubbing them. ___ skin surface will produce the most
# heat? 
開發者ID:INK-USC,項目名稱:KagNet,代碼行數:18,代碼來源:convert_csqa.py

示例6: _json_to_instance

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
        sentence = json_dict["sentence"]
        if "worlds" in json_dict:
            # This is grouped data
            worlds = json_dict["worlds"]
            if isinstance(worlds, str):
                worlds = json.loads(worlds)
        else:
            structured_rep = json_dict["structured_rep"]
            if isinstance(structured_rep, str):
                structured_rep = json.loads(structured_rep)
            worlds = [structured_rep]
        identifier = json_dict["identifier"] if "identifier" in json_dict else None
        instance = self._dataset_reader.text_to_instance(
            sentence=sentence,  # type: ignore
            structured_representations=worlds,
            identifier=identifier,
        )
        return instance 
開發者ID:allenai,項目名稱:allennlp-semparse,代碼行數:21,代碼來源:nlvr_parser.py

示例7: predict

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict(self, question: str, passage: str, question_id: str) -> JsonDict:
        """
        Make a machine comprehension prediction on the supplied input.
        See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.

        Parameters
        ----------
        question : ``str``
            A question about the content in the supplied paragraph.  The question must be answerable by a
            span in the paragraph.
        passage : ``str``
            A paragraph of information relevant to the question.

        Returns
        -------
        A dictionary that represents the prediction made by the system.  The answer string will be under the
        "best_span_str" key.
        """
        return self.predict_json({"passage" : passage, "question" : question, "question_id": question_id}) 
開發者ID:mandarjoshi90,項目名稱:pair2vec,代碼行數:21,代碼來源:squad_predictor.py

示例8: predict_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_json(self, input: JsonDict):
        instance = self._json_to_instance(input)
        output = self._model.forward_on_instance(instance)
        return_json = {}
        return_json["input"] = input

        label_probs = output["label_probs"]
        predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
        premises_attentions = output.get("premises_attentions", None)
        premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

        return_json["label_probs"] = label_probs
        return_json["predicted_answer_indices"] = predicted_answer_indices
        if premises_attentions is not None:
            return_json["premises_attentions"] = premises_attentions
            return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
        return sanitize(return_json) 
開發者ID:StonyBrookNLP,項目名稱:multee,代碼行數:19,代碼來源:multiple_correct_mcq_entailment.py

示例9: predict_batch_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
        instances = self._batch_json_to_instances(inputs)
        outputs = self._model.forward_on_instances(instances)
        return_jsons = []
        for input, output in zip(inputs, outputs):
            return_json = {}
            return_json["input"] = input
            premises_count = len(input["premises"])

            label_probs = output["label_probs"]
            predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
            premises_attentions = output.get("premises_attentions", None)
            premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

            return_json["label_probs"] = label_probs
            return_json["predicted_answer_indices"] = predicted_answer_indices
            if premises_attentions is not None:
                return_json["premises_attentions"] = premises_attentions[:, :premises_count]
                return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]

            return_jsons.append(return_json)
        return sanitize(return_jsons) 
開發者ID:StonyBrookNLP,項目名稱:multee,代碼行數:24,代碼來源:multiple_correct_mcq_entailment.py

示例10: predict_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_json(self, input: JsonDict):
        instance = self._json_to_instance(input)
        output = self._model.forward_on_instance(instance)
        return_json = {}
        return_json["input"] = input

        label_probs = output["label_probs"]
        predicted_answer_index = list(label_probs).index(max(label_probs))
        premises_attentions = output.get("premises_attentions", None)
        premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

        return_json["label_probs"] = label_probs
        return_json["predicted_answer_index"] = predicted_answer_index
        if premises_attentions is not None:
            return_json["premises_attentions"] = premises_attentions
            return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
        return sanitize(return_json) 
開發者ID:StonyBrookNLP,項目名稱:multee,代碼行數:19,代碼來源:single_correct_mcq_entailment.py

示例11: predict_batch_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
        instances = self._batch_json_to_instances(inputs)
        outputs = self._model.forward_on_instances(instances)
        return_jsons = []
        for input, output in zip(inputs, outputs):
            return_json = {}
            return_json["input"] = input
            premises_count = len(input["premises"])

            label_probs = output["label_probs"]
            predicted_answer_index = list(label_probs).index(max(label_probs))
            premises_attentions = output.get("premises_attentions", None)
            premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

            return_json["label_probs"] = label_probs
            return_json["predicted_answer_index"] = predicted_answer_index
            if premises_attentions is not None:
                return_json["premises_attentions"] = premises_attentions[:, :premises_count]
                return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]

            return_jsons.append(return_json)
        return sanitize(return_jsons) 
開發者ID:StonyBrookNLP,項目名稱:multee,代碼行數:24,代碼來源:single_correct_mcq_entailment.py

示例12: predict_json

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_json(self, inputs: JsonDict) -> JsonDict:
        return_dict = {}
        citation = read_jurgens_jsonline(inputs)
        if len(citation.text) == 0:
            print('empty context, skipping')
            return {}
        print(self._dataset_reader)
        instance = self._dataset_reader.text_to_instance(
            citation_text=citation.text,
            intent=citation.intent,
            citing_paper_id=citation.citing_paper_id,
            cited_paper_id=citation.cited_paper_id,
            citation_excerpt_index=citation.citation_excerpt_index
        )
        outputs = self._model.forward_on_instance(instance)

        return_dict['citation_id'] = citation.citation_id
        return_dict['citingPaperId'] = outputs['citing_paper_id']
        return_dict['citedPaperId'] = outputs['cited_paper_id']
        return_dict['probabilities'] = outputs['probabilities']
        return_dict['prediction'] = outputs['prediction']
        return return_dict 
開發者ID:allenai,項目名稱:scicite,代碼行數:24,代碼來源:predictor_acl_arc.py

示例13: predict

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict(self, sentence: str) -> JsonDict:
        """
        Predict a dependency parse for the given sentence.
        Parameters
        ----------
        sentence The sentence to parse.

        Returns
        -------
        A dictionary representation of the dependency tree.
        """
        return self.predict_json({"sentence": sentence})

    # def predict_json(self, inputs: JsonDict) -> JsonDict:
    #     instance = self._json_to_instance(inputs)
    #     return self.predict_instance(instance) 
開發者ID:DreamerDeo,項目名稱:HIT-SCIR-CoNLL2019,代碼行數:18,代碼來源:transition_ucca_predictor.py

示例14: _json_to_instance

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
        sentence = json_dict['sentence']
        head = json_dict['head']
        tail = json_dict['tail']
        instance = self._dataset_reader.text_to_instance(sentence=sentence, head=head, tail=tail)
        return instance 
開發者ID:DFKI-NLP,項目名稱:DISTRE,代碼行數:8,代碼來源:predictor.py

示例15: predict_batch_instance

# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import JsonDict [as 別名]
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
        model = self._model

        with torch.no_grad():
            cuda_device = model._get_prediction_device()
            dataset = Batch(instances)
            dataset.index_instances(model.vocab)
            model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
            outputs = model.decode(model(**model_input))

        return sanitize(outputs) 
開發者ID:DFKI-NLP,項目名稱:DISTRE,代碼行數:13,代碼來源:predictor.py


注:本文中的allennlp.common.util.JsonDict方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。