当前位置: 首页>>代码示例>>Python>>正文


Python util.JsonDict方法代码示例

本文整理汇总了Python中allennlp.common.util.JsonDict方法的典型用法代码示例。如果您正苦于以下问题:Python util.JsonDict方法的具体用法?Python util.JsonDict怎么用?Python util.JsonDict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.util的用法示例。


在下文中一共展示了util.JsonDict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: saliency_interpret_from_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        """
        This function finds saliency values for each input token.

        # Parameters

        inputs : `JsonDict`
            The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).

        # Returns

        interpretation : `JsonDict`
            Contains the normalized saliency values for each input token. The dict has entries for
            each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
            Each one of those entries has entries for the saliency of the inputs, e.g.,
            `{grad_input_1: ..., grad_input_2: ... }`.
        """
        raise NotImplementedError("Implement this for saliency interpretations") 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:saliency_interpreter.py

示例2: saliency_interpret_from_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        # Convert inputs to labeled instances
        labeled_instances = self.predictor.json_to_labeled_instances(inputs)

        instances_with_grads = dict()
        for idx, instance in enumerate(labeled_instances):
            # Run smoothgrad
            grads = self._smooth_grads(instance)

            # Normalize results
            for key, grad in grads.items():
                # TODO (@Eric-Wallace), SmoothGrad is not using times input normalization.
                # Fine for now, but should fix for consistency.

                # The [0] here is undo-ing the batching that happens in get_gradients.
                embedding_grad = numpy.sum(grad[0], axis=1)
                norm = numpy.linalg.norm(embedding_grad, ord=1)
                normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
                grads[key] = normalized_grad

            instances_with_grads["instance_" + str(idx + 1)] = grads

        return sanitize(instances_with_grads) 
开发者ID:allenai,项目名称:allennlp,代码行数:25,代码来源:smooth_gradient.py

示例3: saliency_interpret_from_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
        # Convert inputs to labeled instances
        labeled_instances = self.predictor.json_to_labeled_instances(inputs)

        instances_with_grads = dict()
        for idx, instance in enumerate(labeled_instances):
            # Run integrated gradients
            grads = self._integrate_gradients(instance)

            # Normalize results
            for key, grad in grads.items():
                # The [0] here is undo-ing the batching that happens in get_gradients.
                embedding_grad = numpy.sum(grad[0], axis=1)
                norm = numpy.linalg.norm(embedding_grad, ord=1)
                normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
                grads[key] = normalized_grad

            instances_with_grads["instance_" + str(idx + 1)] = grads

        return sanitize(instances_with_grads) 
开发者ID:allenai,项目名称:allennlp,代码行数:22,代码来源:integrated_gradient.py

示例4: attack_from_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def attack_from_json(
        self,
        inputs: JsonDict = None,
        input_field_to_attack: str = "tokens",
        grad_input_field: str = "grad_input_1",
        ignore_tokens: List[str] = None,
        target: JsonDict = None,
    ):
        if target is not None:
            raise ValueError("Input reduction does not implement targeted attacks")
        ignore_tokens = ["@@NULL@@"] if ignore_tokens is None else ignore_tokens
        original_instances = self.predictor.json_to_labeled_instances(inputs)
        original_text_field: TextField = original_instances[0][  # type: ignore
            input_field_to_attack
        ]
        original_tokens = deepcopy(original_text_field.tokens)
        final_tokens = []
        for instance in original_instances:
            final_tokens.append(
                self._attack_instance(
                    inputs, instance, input_field_to_attack, grad_input_field, ignore_tokens
                )
            )
        return sanitize({"final": final_tokens, "original": original_tokens}) 
开发者ID:allenai,项目名称:allennlp,代码行数:26,代码来源:input_reduction.py

示例5: convert_qajson_to_entailment

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def convert_qajson_to_entailment(qa_json: JsonDict):
    question_text = qa_json["question"]["stem"]
    choices = qa_json["question"]["choices"]
    for choice in choices:
        choice_text = choice["text"]

        statement = create_hypothesis(get_fitb_from_question(question_text), choice_text)
        create_output_dict(qa_json, statement,  choice["label"] == qa_json.get("answerKey", "Z"))

    return qa_json


# Get a Fill-In-The-Blank (FITB) statement from the question text. E.g. "George wants to warm his
# hands quickly by rubbing them. Which skin surface will produce the most heat?" ->
# "George wants to warm his hands quickly by rubbing them. ___ skin surface will produce the most
# heat? 
开发者ID:INK-USC,项目名称:KagNet,代码行数:18,代码来源:convert_csqa.py

示例6: _json_to_instance

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
        sentence = json_dict["sentence"]
        if "worlds" in json_dict:
            # This is grouped data
            worlds = json_dict["worlds"]
            if isinstance(worlds, str):
                worlds = json.loads(worlds)
        else:
            structured_rep = json_dict["structured_rep"]
            if isinstance(structured_rep, str):
                structured_rep = json.loads(structured_rep)
            worlds = [structured_rep]
        identifier = json_dict["identifier"] if "identifier" in json_dict else None
        instance = self._dataset_reader.text_to_instance(
            sentence=sentence,  # type: ignore
            structured_representations=worlds,
            identifier=identifier,
        )
        return instance 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:21,代码来源:nlvr_parser.py

示例7: predict

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict(self, question: str, passage: str, question_id: str) -> JsonDict:
        """
        Make a machine comprehension prediction on the supplied input.
        See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.

        Parameters
        ----------
        question : ``str``
            A question about the content in the supplied paragraph.  The question must be answerable by a
            span in the paragraph.
        passage : ``str``
            A paragraph of information relevant to the question.

        Returns
        -------
        A dictionary that represents the prediction made by the system.  The answer string will be under the
        "best_span_str" key.
        """
        return self.predict_json({"passage" : passage, "question" : question, "question_id": question_id}) 
开发者ID:mandarjoshi90,项目名称:pair2vec,代码行数:21,代码来源:squad_predictor.py

示例8: predict_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_json(self, input: JsonDict):
        instance = self._json_to_instance(input)
        output = self._model.forward_on_instance(instance)
        return_json = {}
        return_json["input"] = input

        label_probs = output["label_probs"]
        predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
        premises_attentions = output.get("premises_attentions", None)
        premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

        return_json["label_probs"] = label_probs
        return_json["predicted_answer_indices"] = predicted_answer_indices
        if premises_attentions is not None:
            return_json["premises_attentions"] = premises_attentions
            return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
        return sanitize(return_json) 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:19,代码来源:multiple_correct_mcq_entailment.py

示例9: predict_batch_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
        instances = self._batch_json_to_instances(inputs)
        outputs = self._model.forward_on_instances(instances)
        return_jsons = []
        for input, output in zip(inputs, outputs):
            return_json = {}
            return_json["input"] = input
            premises_count = len(input["premises"])

            label_probs = output["label_probs"]
            predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
            premises_attentions = output.get("premises_attentions", None)
            premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

            return_json["label_probs"] = label_probs
            return_json["predicted_answer_indices"] = predicted_answer_indices
            if premises_attentions is not None:
                return_json["premises_attentions"] = premises_attentions[:, :premises_count]
                return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]

            return_jsons.append(return_json)
        return sanitize(return_jsons) 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:24,代码来源:multiple_correct_mcq_entailment.py

示例10: predict_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_json(self, input: JsonDict):
        instance = self._json_to_instance(input)
        output = self._model.forward_on_instance(instance)
        return_json = {}
        return_json["input"] = input

        label_probs = output["label_probs"]
        predicted_answer_index = list(label_probs).index(max(label_probs))
        premises_attentions = output.get("premises_attentions", None)
        premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

        return_json["label_probs"] = label_probs
        return_json["predicted_answer_index"] = predicted_answer_index
        if premises_attentions is not None:
            return_json["premises_attentions"] = premises_attentions
            return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
        return sanitize(return_json) 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:19,代码来源:single_correct_mcq_entailment.py

示例11: predict_batch_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
        instances = self._batch_json_to_instances(inputs)
        outputs = self._model.forward_on_instances(instances)
        return_jsons = []
        for input, output in zip(inputs, outputs):
            return_json = {}
            return_json["input"] = input
            premises_count = len(input["premises"])

            label_probs = output["label_probs"]
            predicted_answer_index = list(label_probs).index(max(label_probs))
            premises_attentions = output.get("premises_attentions", None)
            premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)

            return_json["label_probs"] = label_probs
            return_json["predicted_answer_index"] = predicted_answer_index
            if premises_attentions is not None:
                return_json["premises_attentions"] = premises_attentions[:, :premises_count]
                return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]

            return_jsons.append(return_json)
        return sanitize(return_jsons) 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:24,代码来源:single_correct_mcq_entailment.py

示例12: predict_json

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_json(self, inputs: JsonDict) -> JsonDict:
        return_dict = {}
        citation = read_jurgens_jsonline(inputs)
        if len(citation.text) == 0:
            print('empty context, skipping')
            return {}
        print(self._dataset_reader)
        instance = self._dataset_reader.text_to_instance(
            citation_text=citation.text,
            intent=citation.intent,
            citing_paper_id=citation.citing_paper_id,
            cited_paper_id=citation.cited_paper_id,
            citation_excerpt_index=citation.citation_excerpt_index
        )
        outputs = self._model.forward_on_instance(instance)

        return_dict['citation_id'] = citation.citation_id
        return_dict['citingPaperId'] = outputs['citing_paper_id']
        return_dict['citedPaperId'] = outputs['cited_paper_id']
        return_dict['probabilities'] = outputs['probabilities']
        return_dict['prediction'] = outputs['prediction']
        return return_dict 
开发者ID:allenai,项目名称:scicite,代码行数:24,代码来源:predictor_acl_arc.py

示例13: predict

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict(self, sentence: str) -> JsonDict:
        """
        Predict a dependency parse for the given sentence.
        Parameters
        ----------
        sentence The sentence to parse.

        Returns
        -------
        A dictionary representation of the dependency tree.
        """
        return self.predict_json({"sentence": sentence})

    # def predict_json(self, inputs: JsonDict) -> JsonDict:
    #     instance = self._json_to_instance(inputs)
    #     return self.predict_instance(instance) 
开发者ID:DreamerDeo,项目名称:HIT-SCIR-CoNLL2019,代码行数:18,代码来源:transition_ucca_predictor.py

示例14: _json_to_instance

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
        sentence = json_dict['sentence']
        head = json_dict['head']
        tail = json_dict['tail']
        instance = self._dataset_reader.text_to_instance(sentence=sentence, head=head, tail=tail)
        return instance 
开发者ID:DFKI-NLP,项目名称:DISTRE,代码行数:8,代码来源:predictor.py

示例15: predict_batch_instance

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import JsonDict [as 别名]
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
        model = self._model

        with torch.no_grad():
            cuda_device = model._get_prediction_device()
            dataset = Batch(instances)
            dataset.index_instances(model.vocab)
            model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
            outputs = model.decode(model(**model_input))

        return sanitize(outputs) 
开发者ID:DFKI-NLP,项目名称:DISTRE,代码行数:13,代码来源:predictor.py


注:本文中的allennlp.common.util.JsonDict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。