本文整理汇总了Python中tensorflow.metrics方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.metrics方法的具体用法?Python tensorflow.metrics怎么用?Python tensorflow.metrics使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.metrics方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: variance
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def variance(data, weights=None):
"""Returns the variance of input tensor t, each entry weighted by the
corresponding index in weights.
Follows the tf.metrics API for an idempotent tensor and an update tensor.
Args:
data: input tensor of arbitrary shape.
weights: input tensor of same shape as `t`. When None, use a weight of 1 for
all inputs.
Returns:
variance_value: idempotent tensor containing the variance of `t`, whose
shape is `[1]`
update_op: A (non-idempotent) op to update the variance value
"""
if weights is None:
weights = tf.ones(shape=data.shape, dtype=tf.float32)
tsquared_mean, tsquared_update = tf.metrics.mean(tf.square(data), weights)
mean_t, t_update = tf.metrics.mean(data, weights)
variance_value = tsquared_mean - mean_t * mean_t
update_op = tf.group(tsquared_update, t_update)
return variance_value, update_op
示例2: r2_metric
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def r2_metric(preds, targets, weights):
"""Returns ops for R2 statistic following the tf.metrics API.
Args:
preds: predictions (arbitrary shape)
targets: targets (same shape as predictions)
weights: per-instance weights (same shape as predictions)
Returns:
r2: idempotent tensor containing the r2 value
update_op: op for updating the value given new data
"""
res_ss, res_ss_update = tf.metrics.mean(tf.square(preds - targets), weights)
tot_ss, tot_ss_update = variance(targets, weights)
r2 = 1. - res_ss / tot_ss
update_op = tf.group(res_ss_update, tot_ss_update)
return r2, update_op
示例3: _per_target_variance
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _per_target_variance(data, weights=None):
"""Returns the variance of input tensor t, each entry weighted by the
corresponding index in weights.
Follows the tf.metrics API for an idempotent tensor and an update tensor.
Args:
data: input tensor of arbitrary shape.
weights: input tensor of same shape as `t`. When None, use a weight of 1 for
all inputs.
Returns:
variance_value: idempotent tensor containing the variance of `t`, whose
shape is `[1]`
update_op: A (non-idempotent) op to update the variance value
"""
if weights is None:
weights = tf.ones(shape=data.shape, dtype=tf.float32)
tsquared_mean, tsquared_update = _per_target_mean(tf.square(data), weights)
mean_t, t_update = _per_target_mean(data, weights)
variance_value = tsquared_mean - mean_t * mean_t
update_op = tf.group(tsquared_update, t_update)
return variance_value, update_op
示例4: per_target_r2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def per_target_r2(preds, targets, weights):
"""Returns ops for per-target R2 statistic following the tf.metrics API.
Args:
preds: arbitrary shaped predictions, with final dimension
indexing distinct targets
targets: targets (same shape as predictions)
weights: per-instance weights (same shape as predictions)
Returns:
r2: idempotent [preds.shape[-1]] tensor of r2 values for each target.
update_op: op for updating the value given new data
"""
res_ss, res_ss_update = _per_target_mean(tf.square(preds - targets), weights)
tot_ss, tot_ss_update = _per_target_variance(targets, weights)
r2 = 1. - res_ss / tot_ss
update_op = tf.group(res_ss_update, tot_ss_update)
return r2, update_op
示例5: eval_metric_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def eval_metric_fn(labels, predictions, params):
"""Returns dict of <metric name>: <tf.metrics metric>.
Args:
labels: Ground truth values in [survival array | failure array] format
predictions: Conditional likelihoods of surviving each interval
params: Dict containing model parameters, including classification
threshold
"""
metrics = {}
num_unbounded_intervals = metadata.NUM_INTERVALS + 1
labels_value = get_label(labels)
class_preds = get_class(predictions, params['threshold'])
accuracy = tf.metrics.accuracy(labels_value,
class_preds,
name='acc_op')
metrics['accuracy'] = accuracy
accuracy_per_class = tf.metrics.mean_per_class_accuracy(
labels=labels_value,
predictions=class_preds,
num_classes=num_unbounded_intervals,
name='accuracy_per_class_op')
metrics['accuracy_per_class'] = accuracy_per_class
return metrics
示例6: reconstruction_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def reconstruction_loss(self, x_input, x_target, x_length, z=None,
c_input=None):
"""Reconstruction loss calculation.
Args:
x_input: Batch of decoder input sequences for teacher forcing, sized
`[batch_size, max(x_length), output_depth]`.
x_target: Batch of expected output sequences to compute loss against,
sized `[batch_size, max(x_length), output_depth]`.
x_length: Length of input/output sequences, sized `[batch_size]`.
z: (Optional) Latent vectors. Required if model is conditional. Sized
`[n, z_size]`.
c_input: (Optional) Batch of control sequences, sized
`[batch_size, max(x_length), control_depth]`. Required if conditioning
on control sequences.
Returns:
r_loss: The reconstruction loss for each sequence in the batch.
metric_map: Map from metric name to tf.metrics return values for logging.
"""
pass
示例7: _flat_reconstruction_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
b_enc, b_dec = tf.split(
flat_rnn_output,
[self._nade.num_hidden, self._output_depth], axis=1)
ll, cond_probs = self._nade.log_prob(
flat_x_target, b_enc=b_enc, b_dec=b_dec)
r_loss = -ll
flat_truth = tf.cast(flat_x_target, tf.bool)
flat_predictions = tf.greater_equal(cond_probs, 0.5)
metric_map = {
'metrics/accuracy':
tf.metrics.mean(
tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
'metrics/recall':
tf.metrics.recall(flat_truth, flat_predictions),
'metrics/precision':
tf.metrics.precision(flat_truth, flat_predictions),
}
return r_loss, metric_map
示例8: _make_logging_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _make_logging_ops(true_keys, predicted_keys, ranks, log_dir):
"""tf.metrics-compatible ops for saving and logging results."""
all_true_keys = []
all_predicted_keys = []
all_ranks = []
def _extend_keys(true_batch_keys, predicted_batch_keys, batch_ranks):
all_true_keys.extend(true_batch_keys)
all_predicted_keys.extend(predicted_batch_keys)
all_ranks.extend(batch_ranks)
return np.int32(0)
update_op = tf.py_func(_extend_keys, [true_keys, predicted_keys, ranks],
[tf.int32])[0]
def _write_log_to_file(global_step):
return _log_predictions(all_true_keys, all_predicted_keys, all_ranks,
global_step, log_dir)
value_op = tf.py_func(_write_log_to_file,
[tf.train.get_or_create_global_step()], [tf.int32])[0]
return (value_op, update_op)
示例9: get_metric_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
"""Returns the metric_ops entry for this metric.
Note that the metric will be added to metric_ops via
metric_ops.update(metric.get_metric_ops()).
Args:
features_dict: Dictionary containing references to the features Tensors
for the model.
predictions_dict: Dictionary containing references to the predictions
Tensors for the model.
labels_dict: Dictionary containing references to the labels Tensors for
the model.
Returns:
A metric op dictionary,
i.e. a dictionary[metric_name] = (value_op, update_op) containing all
the metrics and ops for this metric.
"""
raise NotImplementedError('not implemented')
示例10: populate_stats_and_pop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def populate_stats_and_pop(
self, slice_key: slicer.SliceKeyType, combined_metrics: Dict[Text, Any],
output_metrics: Dict[Text, metrics_pb2.MetricValue]) -> None:
"""Converts the metric in `combined_metrics` to `output_metrics` and pops.
Please override the method if the metric is NOT plot type and should be
converted into non-float type. The metric should also be popped out of
`combined_metrics` after conversion. By default, this method does nothing.
The metric, along with the rest metrics in `combined_metrics` will be
converted into float values afterwards.
Args:
slice_key: The name of slice.
combined_metrics: The dict containing raw TFMA metrics.
output_metrics: The dict where we convert the metrics to.
"""
pass
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def __init__(self,
example_weight_key: Optional[Text] = None,
target_prediction_keys: Optional[List[Text]] = None,
labels_key: Optional[Text] = None,
metric_tag: Optional[Text] = None,
tensor_index: Optional[int] = None):
"""Create a metric that computes calibration.
Args:
example_weight_key: The key of the example weight column in the features
dict. If None, all predictions are given a weight of 1.0.
target_prediction_keys: If provided, the prediction keys to look for in
order.
labels_key: If provided, a custom label key.
metric_tag: If provided, a custom metric tag. Only necessary to
disambiguate instances of the same metric on different predictions.
tensor_index: Optional index to specify class predictions to calculate
metrics on in the case of multi-class models.
"""
self._example_weight_key = example_weight_key
super(_Calibration, self).__init__(
target_prediction_keys=target_prediction_keys,
labels_key=labels_key,
metric_tag=metric_tag)
示例12: evaluate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def evaluate(self, iou_threshold=0.5):
self.metrics = {}
for label in range(self.num_classes):
self.metrics[label] = evaluate_detector(
self.groundtruth[label],
self.detections[label],
iou_threshold
)
if self.num_classes > 1:
APs = [
self.metrics[label]['AP']
for label in range(self.num_classes)
]
self.metrics['mAP'] = np.mean(APs)
示例13: _initialize_metrics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _initialize_metrics(self, metrics):
'''
Initializes/resets the metrics before every call to `train` and `evaluate`.
'''
# Reset lists of previous tracked metrics.
self.metric_names = []
self.best_metric_values = []
self.metric_update_ops = []
self.metric_value_tensors = []
# Set the metrics that will be evaluated.
if 'loss' in metrics:
self.metric_names.append('loss')
self.best_metric_values.append(99999999.9)
self.metric_update_ops.append(self.mean_loss_update_op)
self.metric_value_tensors.append(self.mean_loss_value)
if 'mean_iou' in metrics:
self.metric_names.append('mean_iou')
self.best_metric_values.append(0.0)
self.metric_update_ops.append(self.mean_iou_update_op)
self.metric_value_tensors.append(self.mean_iou_value)
if 'accuracy' in metrics:
self.metric_names.append('accuracy')
self.best_metric_values.append(0.0)
self.metric_update_ops.append(self.acc_update_op)
self.metric_value_tensors.append(self.acc_value)
示例14: _evaluate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _evaluate(self, data_generator, metrics, num_batches, l2_regularization, description='Running evaluation'):
'''
Internal method used by both `evaluate()` and `train()` that performs
the actual evaluation. For the first three arguments, please refer
to the documentation of the public `evaluate()` method.
Arguments:
description (string, optional): A description string that will be prepended
to the progress bar while the evaluation is being processed. During
training, this description is used to clarify whether the evaluation
is being performed on the training or validation dataset.
'''
# Reset all metrics' accumulator variables.
self.sess.run(self.metrics_reset_op)
# Set up the progress bar.
tr = trange(num_batches, file=sys.stdout)
tr.set_description(description)
# Accumulate metrics in batches.
for step in tr:
batch_images, batch_labels = next(data_generator)
self.sess.run(self.metric_update_ops,
feed_dict={self.image_input: batch_images,
self.labels: batch_labels,
self.keep_prob: 1.0,
self.l2_regularization_rate: l2_regularization})
# Compute final metric values.
self.metric_values = self.sess.run(self.metric_value_tensors)
evaluation_results_string = ''
for i, metric_name in enumerate(self.metric_names):
evaluation_results_string += metric_name + ': {:.4f} '.format(self.metric_values[i])
print(evaluation_results_string)
示例15: _per_target_mean
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import metrics [as 别名]
def _per_target_mean(values, weights, name='per-target-mean'):
"""Compute weighted mean across all but final dimension.
Args:
values: [..., num_targets] Tensor
weights: Tensor. Either the same shape as values or broadcastable to it.
name: string
Returns:
tuple containing tf.metrics-compatible value op and update_op.
The value_op has shape [num_targets].
"""
# First, reduce over all but the final dimension
values = tf.convert_to_tensor(values)
weights = tf.convert_to_tensor(weights)
weights_dtype = tf.float64 if values.dtype == tf.float64 else tf.float32
weights = tf.cast(weights, weights_dtype)
reduction_axes = list(range(values.shape.ndims - 1))
reduced_weights = tf.reduce_mean(weights, axis=reduction_axes)
reduced_weighted_values = tf.reduce_mean(
values * weights, axis=reduction_axes)
return tf.metrics.mean_tensor(reduced_weighted_values *
(1. / reduced_weights), reduced_weights)