本文整理匯總了Python中tensorflow.python.ops.math_ops.squared_difference方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.squared_difference方法的具體用法?Python math_ops.squared_difference怎麽用?Python math_ops.squared_difference使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.squared_difference方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: setUp
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def setUp(self):
super(FloatBinaryOpsTest, self).setUp()
self.ops = [
('igamma', None, math_ops.igamma, core.igamma),
('igammac', None, math_ops.igammac, core.igammac),
('zeta', None, math_ops.zeta, core.zeta),
('polygamma', None, math_ops.polygamma, core.polygamma),
('maximum', None, math_ops.maximum, core.maximum),
('minimum', None, math_ops.minimum, core.minimum),
('squared_difference', None, math_ops.squared_difference,
core.squared_difference),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
self.test_lt_1 = test_lt
self.test_lt_2 = 1.0 - test_lt
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
示例2: mean_squared_error
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(losses, weights, scope=scope)
示例3: contrastive_loss
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.squared_difference(embeddings_anchor, embeddings_positive),
1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.cast(labels, distances.dtype) * math_ops.square(distances) +
(1. - math_ops.cast(labels, distances.dtype)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
示例4: testSquaredDifference
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y)*(x - y)
with self.test_session(use_gpu=True):
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
示例5: _mean_squared_error
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def _mean_squared_error(self, targets, outputs, mask):
loss = math_ops.squared_difference(targets, outputs)
# TODO: Make the below safe to div by zero
mse = tf.reduce_sum( loss ) / tf.reduce_sum( mask )
return mse
示例6: _mean_squared_error
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def _mean_squared_error(targets, outputs, mask):
loss = math_ops.squared_difference(targets, outputs)
# TODO: Make the below safe to div by zero
mse = tf.reduce_sum(loss) / tf.reduce_sum(mask)
return mse
示例7: _test_squared_difference
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def _test_squared_difference(data):
""" One iteration of squared difference """
return _test_elemwise(math_ops.squared_difference, data)
#######################################################################
# Floor_divide
# ------------
示例8: sufficient_statistics
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
示例9: moments
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def moments(x, axes, shift=None, name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: for numerical stability, when shift=None, the true mean
would be computed and used as shift.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` in which case the true mean of the data is
used as shift. A shift close to the true mean provides the most
numerically stable results.
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes, shift]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
if shift is None:
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(
math_ops.reduce_mean(y, axes, keep_dims=True))
else:
shift = math_ops.cast(shift, y.dtype)
shifted_mean = math_ops.reduce_mean(
math_ops.subtract(y, shift), axes, keep_dims=True, name="shifted_mean")
variance = math_ops.subtract(
math_ops.reduce_mean(
math_ops.squared_difference(y, shift), axes, keep_dims=True),
math_ops.square(shifted_mean),
name="variance")
mean = math_ops.add(shifted_mean, shift, name="mean")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16), math_ops.cast(
variance, dtypes.float16))
else:
return (mean, variance)
示例10: sufficient_statistics
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.is_fully_defined():
counts = 1
for d in axes:
counts *= x_shape[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_dims = array_ops.gather(array_ops.shape(x), axes)
counts = math_ops.cast(
math_ops.reduce_prod(x_dims), x.dtype, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.sub(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
示例11: mean_squared_error
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:47,代碼來源:losses_impl.py
示例12: moments
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import squared_difference [as 別名]
def moments(x, axes,
shift=None, # pylint: disable=unused-argument
name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used, the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keep_dims=True, name="mean")
# sample variance, not unbiased variance
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keep_dims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16), math_ops.cast(
variance, dtypes.float16))
else:
return (mean, variance)
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:52,代碼來源:nn_impl.py