本文整理汇总了Python中tensorflow.python.keras.backend.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: weighted
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = math_ops.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(
math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
示例2: call
def call(self, x):
if len(x) != 2:
raise Exception('input layers must be a list: mean and stddev')
if len(x[0].shape) != 2 or len(x[1].shape) != 2:
raise Exception('input shape is not a vector [batchSize, latentSize]')
mean = x[0]
stddev = x[1]
if self.reg == 'bvae':
# kl divergence:
latent_loss = -0.5 * K.mean(1 + stddev
- K.square(mean)
- K.exp(stddev), axis=-1)
# use beta to force less usage of vector space:
# also try to use <capacity> dimensions of the space:
latent_loss = self.beta * K.abs(latent_loss - self.capacity/self.shape.as_list()[1])
self.add_loss(latent_loss, x)
elif self.reg == 'vae':
# kl divergence:
latent_loss = -0.5 * K.mean(1 + stddev
- K.square(mean)
- K.exp(stddev), axis=-1)
self.add_loss(latent_loss, x)
epsilon = K.random_normal(shape=self.shape,
mean=0., stddev=1.)
if self.random:
# 'reparameterization trick':
return mean + K.exp(stddev) * epsilon
else: # do not perform random sampling, simply grab the impulse value
return mean + 0*stddev # Keras needs the *0 so the gradinent is not None
示例3: weighted
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update shape of weights if possible before adding mask.
# Update dimensions of weights to match with mask if possible.
mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
mask, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(weights, mask)
weights *= mask
except ValueError:
score_array *= mask
score_array /= K.mean(mask)
# TODO(psv): Handle case when mask and weight shapes are not
# compatible.
# Apply sample weighting.
if weights is not None:
# Update dimensions of weights to match with values if possible.
score_array, _, weights = metrics_module.squeeze_or_expand_dimensions(
score_array, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(weights, score_array)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array = math_ops.multiply(score_array, weights)
score_array = math_ops.reduce_sum(score_array)
weights = math_ops.reduce_sum(weights)
score_array = metrics_module.safe_div(score_array, weights)
return K.mean(score_array)
示例4: correlation_coefficient_loss
def correlation_coefficient_loss(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return 1 - K.square(r)
示例5: logloss
def logloss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
losses += math_ops.multiply((1 - y_true),
math_ops.log(1 - y_pred + K.epsilon()))
return K.mean(-losses, axis=-1)
示例6: _eager_metrics_fn
def _eager_metrics_fn(model,
outputs,
targets,
sample_weights=None,
masks=None,
return_stateful_result=True):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
sample_weights: Optional list of sample weights for each output.
masks: Optional list of masks for each output.
return_stateful_result: Boolean, indicates whether the stateful
(aggregated)/stateless metric result should be returned.
Returns:
Returns the metric results for each output of the model.
"""
outputs = nest.flatten(outputs)
targets = nest.flatten(targets)
# TODO(psv): Consider supporting skip target indices in eager mode?
metric_results = model._handle_metrics(
outputs,
targets=targets,
sample_weights=sample_weights,
masks=masks,
return_stateful_result=return_stateful_result)
return [backend.mean(t) for t in metric_results]
示例7: compute_weighted_loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
# If this function is called directly, then we just default 'AUTO' to
# 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
if reduction == ReductionV2.AUTO:
reduction = ReductionV2.SUM_OVER_BATCH_SIZE
if sample_weight is None:
sample_weight = 1.0
with K.name_scope(name or 'weighted_loss'):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions(
losses, None, sample_weight)
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtypes.float32)
sample_weight = math_ops.cast(sample_weight, dtypes.float32)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, losses)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(losses)
weight_ndim = K.ndim(sample_weight)
losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))
sample_weight.shape.assert_is_compatible_with(losses.shape)
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
示例8: binary_crossentropy
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
示例9: weighted
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, weights = squeeze_or_expand_dimensions(mask, None, weights)
weights *= mask
# Apply sample weighting.
if weights is not None:
# Update dimensions of weights to match with values if possible.
score_array, _, weights = squeeze_or_expand_dimensions(
score_array, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(weights, score_array)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array = math_ops.multiply(score_array, weights)
score_array = math_ops.reduce_sum(score_array)
weights = math_ops.reduce_sum(weights)
score_array = math_ops.div_no_nan(score_array, weights)
return K.mean(score_array)
示例10: compute_weighted_loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
losses_impl.ReductionV2.validate(reduction)
if sample_weight is None:
sample_weight = 1.0
with ops.name_scope(name, 'weighted_loss', (losses, sample_weight)):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas.
# TODO(josh11b): Associate it with the returned op for more precision.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions(
losses, None, sample_weight)
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
sample_weight = math_ops.to_float(sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, losses)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(losses)
weight_ndim = K.ndim(sample_weight)
losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))
sample_weight.get_shape().assert_is_compatible_with(losses.get_shape())
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = _reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
示例11: binary_crossentropy
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
示例12: call
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, backend.floatx())
input_shape = inputs.shape.as_list()
broadcast_shape = [-1, input_shape[steps_axis], 1]
mask = array_ops.reshape(mask, broadcast_shape)
inputs *= mask
return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum(
mask, axis=steps_axis)
else:
return backend.mean(inputs, axis=steps_axis)
示例13: compute_weighted_loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
if sample_weight is None:
sample_weight = 1.0
with ops.name_scope(name, 'weighted_loss', (losses, sample_weight)):
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions(
losses, None, sample_weight)
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtypes.float32)
sample_weight = math_ops.cast(sample_weight, dtypes.float32)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, losses)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(losses)
weight_ndim = K.ndim(sample_weight)
losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))
sample_weight.shape.assert_is_compatible_with(losses.shape)
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
示例14: hinge
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
y_pred: The predicted values.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
示例15: _eager_metrics_fn
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
sample_weights: Optional list of sample weights for each output.
masks: Optional list of masks for each output.
Returns:
Returns the metric results for each output of the model.
"""
outputs = generic_utils.to_list(outputs)
targets = generic_utils.to_list(targets)
# TODO(psv): Consider supporting skip target indices in eager mode?
metric_results = model._handle_metrics(
outputs, targets=targets, sample_weights=sample_weights, masks=masks)
return [backend.mean(t) for t in metric_results]