本文整理匯總了Python中allennlp.common.util.sanitize方法的典型用法代碼示例。如果您正苦於以下問題:Python util.sanitize方法的具體用法?Python util.sanitize怎麽用?Python util.sanitize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.common.util
的用法示例。
在下文中一共展示了util.sanitize方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: saliency_interpret_from_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run smoothgrad
grads = self._smooth_grads(instance)
# Normalize results
for key, grad in grads.items():
# TODO (@Eric-Wallace), SmoothGrad is not using times input normalization.
# Fine for now, but should fix for consistency.
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
示例2: saliency_interpret_from_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run integrated gradients
grads = self._integrate_gradients(instance)
# Normalize results
for key, grad in grads.items():
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
示例3: attack_from_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def attack_from_json(
self,
inputs: JsonDict = None,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
):
if target is not None:
raise ValueError("Input reduction does not implement targeted attacks")
ignore_tokens = ["@@NULL@@"] if ignore_tokens is None else ignore_tokens
original_instances = self.predictor.json_to_labeled_instances(inputs)
original_text_field: TextField = original_instances[0][ # type: ignore
input_field_to_attack
]
original_tokens = deepcopy(original_text_field.tokens)
final_tokens = []
for instance in original_instances:
final_tokens.append(
self._attack_instance(
inputs, instance, input_field_to_attack, grad_input_field, ignore_tokens
)
)
return sanitize({"final": final_tokens, "original": original_tokens})
示例4: predict_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_json(self, input: JsonDict):
instance = self._json_to_instance(input)
output = self._model.forward_on_instance(instance)
return_json = {}
return_json["input"] = input
label_probs = output["label_probs"]
predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
premises_attentions = output.get("premises_attentions", None)
premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)
return_json["label_probs"] = label_probs
return_json["predicted_answer_indices"] = predicted_answer_indices
if premises_attentions is not None:
return_json["premises_attentions"] = premises_attentions
return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
return sanitize(return_json)
示例5: predict_batch_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
outputs = self._model.forward_on_instances(instances)
return_jsons = []
for input, output in zip(inputs, outputs):
return_json = {}
return_json["input"] = input
premises_count = len(input["premises"])
label_probs = output["label_probs"]
predicted_answer_indices = [index for index, prob in enumerate(list(label_probs)) if prob >= 0.5]
premises_attentions = output.get("premises_attentions", None)
premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)
return_json["label_probs"] = label_probs
return_json["predicted_answer_indices"] = predicted_answer_indices
if premises_attentions is not None:
return_json["premises_attentions"] = premises_attentions[:, :premises_count]
return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]
return_jsons.append(return_json)
return sanitize(return_jsons)
示例6: predict_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_json(self, input: JsonDict):
instance = self._json_to_instance(input)
output = self._model.forward_on_instance(instance)
return_json = {}
return_json["input"] = input
label_probs = output["label_probs"]
predicted_answer_index = list(label_probs).index(max(label_probs))
premises_attentions = output.get("premises_attentions", None)
premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)
return_json["label_probs"] = label_probs
return_json["predicted_answer_index"] = predicted_answer_index
if premises_attentions is not None:
return_json["premises_attentions"] = premises_attentions
return_json["premises_aggregation_attentions"] = premises_aggregation_attentions
return sanitize(return_json)
示例7: predict_batch_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
outputs = self._model.forward_on_instances(instances)
return_jsons = []
for input, output in zip(inputs, outputs):
return_json = {}
return_json["input"] = input
premises_count = len(input["premises"])
label_probs = output["label_probs"]
predicted_answer_index = list(label_probs).index(max(label_probs))
premises_attentions = output.get("premises_attentions", None)
premises_aggregation_attentions = output.get("premises_aggregation_attentions", None)
return_json["label_probs"] = label_probs
return_json["predicted_answer_index"] = predicted_answer_index
if premises_attentions is not None:
return_json["premises_attentions"] = premises_attentions[:, :premises_count]
return_json["premises_aggregation_attentions"] = premises_aggregation_attentions[:, :premises_count]
return_jsons.append(return_json)
return sanitize(return_jsons)
示例8: predict_batch_instance
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
model = self._model
with torch.no_grad():
cuda_device = model._get_prediction_device()
dataset = Batch(instances)
dataset.index_instances(model.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
outputs = model.decode(model(**model_input))
return sanitize(outputs)
示例9: saliency_interpret_from_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the loss with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
# List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[numpy.ndarray] = []
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Hook used for saving embeddings
handle = self._register_forward_hook(embeddings_list)
grads = self.predictor.get_gradients([instance])[0]
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
示例10: capture_model_internals
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def capture_model_internals(self) -> Iterator[dict]:
"""
Context manager that captures the internal-module outputs of
this predictor's model. The idea is that you could use it as follows:
```
with predictor.capture_model_internals() as internals:
outputs = predictor.predict_json(inputs)
return {**outputs, "model_internals": internals}
```
"""
results = {}
hooks = []
# First we'll register hooks to add the outputs of each module to the results dict.
def add_output(idx: int):
def _add_output(mod, _, outputs):
results[idx] = {"name": str(mod), "output": sanitize(outputs)}
return _add_output
for idx, module in enumerate(self._model.modules()):
if module != self._model:
hook = module.register_forward_hook(add_output(idx))
hooks.append(hook)
# If you capture the return value of the context manager, you get the results dict.
yield results
# And then when you exit the context we remove all the hooks.
for hook in hooks:
hook.remove()
示例11: predict_batch_instance
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
return sanitize(outputs)
示例12: test_sanitize
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def test_sanitize(self):
assert util.sanitize(torch.Tensor([1, 2])) == [1, 2]
assert util.sanitize(torch.LongTensor([1, 2])) == [1, 2]
with pytest.raises(ValueError):
util.sanitize(Unsanitizable())
assert util.sanitize(Sanitizable()) == {"sanitizable": True}
示例13: predict_json
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_json(self, inputs: JsonDict, cuda_device: int = -1):
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance, cuda_device)
json_output = inputs
json_output["score"] = outputs["label_probs"][self._entailment_idx]
json_output["label_probs"] = outputs["label_probs"]
json_output["label_logits"] = outputs["label_logits"]
return sanitize(json_output)
示例14: predict_batch_instance
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
if "@@UNKNOWN@@" not in self._model.vocab._token_to_index["lemmas"]:
# Handle cases where the labels are present in the test set but not training set
for instance in instances:
self._predict_unknown(instance)
outputs = self._model.forward_on_instances(instances)
return sanitize(outputs)
示例15: predict_instance
# 需要導入模塊: from allennlp.common import util [as 別名]
# 或者: from allennlp.common.util import sanitize [as 別名]
def predict_instance(self, instance: Instance) -> JsonDict:
if "@@UNKNOWN@@" not in self._model.vocab._token_to_index["lemmas"]:
# Handle cases where the labels are present in the test set but not training set
self._predict_unknown(instance)
outputs = self._model.forward_on_instance(instance)
return sanitize(outputs)