本文整理汇总了Python中tensorflow.python.ops.standard_ops.reduce_sum方法的典型用法代码示例。如果您正苦于以下问题:Python standard_ops.reduce_sum方法的具体用法?Python standard_ops.reduce_sum怎么用?Python standard_ops.reduce_sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.standard_ops
的用法示例。
在下文中一共展示了standard_ops.reduce_sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mean_squared_error
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def mean_squared_error(output, target, is_mean=False):
"""Return the TensorFlow expression of mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
return mse
示例2: normalized_mean_square_error
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def normalized_mean_square_error(output, target):
"""Return the TensorFlow expression of normalized mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse
示例3: cosine_similarity
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def cosine_similarity(v1, v2):
"""Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.
Parameters
-----------
v1, v2 : tensor of [batch_size, n_feature], with the same number of features.
Returns
-----------
a tensor of [batch_size, ]
"""
try: ## TF1.0
cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
except: ## TF0.12
cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
return cost
## Regularization Functions
示例4: testIndexedSlicesGradient
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def testIndexedSlicesGradient(self):
with ops.Graph().as_default():
embedding_matrix = tf.get_variable(
"embedding_matrix", [5, 5],
initializer=tf.random_normal_initializer())
def Cond(it, _):
return it < 5
def Body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
cost += tf.reduce_sum(embedding)
return it + 1, cost
_, cost = control_flow_ops.while_loop(
Cond, Body, [tf.constant(0), tf.constant(0.0)])
optimizer = momentum.MomentumOptimizer(0.1, 0.9)
train_op = optimizer.minimize(cost)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
示例5: testIndexedSlicesWithDynamicShapeGradientInWhileLoop
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session() as sess:
inputs = tf.placeholder(dtype=dtype)
initial_outputs = tf.TensorArray(dtype=dtype, dynamic_size=True,
size=1)
initial_i = tf.constant(0, dtype=dtypes.int32)
def Cond(i, _):
return i < tf.size(inputs) # pylint: disable=cell-var-from-loop
def Body(i, outputs):
x = tf.gather(inputs, i) # pylint: disable=cell-var-from-loop
outputs = outputs.write(i, x)
return i + 1, outputs
_, outputs = tf.while_loop(Cond, Body, [initial_i, initial_outputs])
outputs = tf.reduce_sum(outputs.pack())
r = tf.gradients([outputs], [inputs])[0]
grad_wr_inputs = ops.convert_to_tensor(r)
o, grad = sess.run([outputs, grad_wr_inputs],
feed_dict={inputs: [1, 3, 2]})
self.assertEquals(o, 6)
self.assertAllEqual(grad, [1] * 3)
示例6: l1_regularizer
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.multiply(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
示例7: binary_cross_entropy
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
"""Computes binary cross entropy given `output`.
For brevity, let `x = output`, `z = target`. The binary cross entropy loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
output : tensor of type `float32` or `float64`.
target : tensor of the same type and shape as `output`.
epsilon : float
A small value to avoid output is zero.
name : string
An optional name to attach to this layer.
References
-----------
- `DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`_
"""
# from tensorflow.python.framework import ops
# with ops.op_scope([output, target], name, "bce_loss") as name:
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
with tf.name_scope(name):
return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) +
(1. - target) * tf.log(1. - output + epsilon)), axis=1))
示例8: dice_coe
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def dice_coe(output, target, epsilon=1e-10):
"""Sørensen–Dice coefficient for comparing the similarity of two distributions,
usually be used for binary image segmentation i.e. labels are binary.
The coefficient = [0, 1], 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
epsilon : float
An optional name to attach to this layer.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_, epsilon=1e-5)
References
-----------
- `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
"""
# inse = tf.reduce_sum( tf.mul(output, target) )
# l = tf.reduce_sum( tf.mul(output, output) )
# r = tf.reduce_sum( tf.mul(target, target) )
inse = tf.reduce_sum( output * target )
l = tf.reduce_sum( output * output )
r = tf.reduce_sum( target * target )
dice = 2 * (inse) / (l + r)
if epsilon == 0:
return dice
else:
return tf.clip_by_value(dice, 0, 1.0-epsilon)
示例9: dice_hard_coe
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def dice_hard_coe(output, target, epsilon=1e-10):
"""Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions,
usually be used for binary image segmentation i.e. labels are binary.
The coefficient = [0, 1], 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
epsilon : float
An optional name to attach to this layer.
Examples
---------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
-----------
- `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
"""
output = tf.cast(output > 0.5, dtype=tf.float32)
target = tf.cast(target > 0.5, dtype=tf.float32)
inse = tf.reduce_sum( output * target )
l = tf.reduce_sum( output * output )
r = tf.reduce_sum( target * target )
dice = 2 * (inse) / (l + r)
if epsilon == 0:
return dice
else:
return tf.clip_by_value(dice, 0, 1.0-epsilon)
示例10: cross_entropy_seq
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num_steps=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Fixed Length RNN outputs.
Parameters
----------
logits : Tensorflow variable
2D tensor, ``network.outputs``, [batch_size*n_steps (n_examples), number of output units]
target_seqs : Tensorflow variable
target : 2D tensor [batch_size, n_steps], if the number of step is dynamic, please use ``cross_entropy_seq_with_mask`` instead.
batch_size : None or int.
If not None, the return cost will be divided by batch_size.
Examples
--------
>>> see PTB tutorial for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, num_steps])
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
"""
try: # TF 1.0
sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
except:
sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example
loss = sequence_loss_by_example_fn(
[logits],
[tf.reshape(target_seqs, [-1])],
[tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)])
# [tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) #/ batch_size
if batch_size is not None:
cost = cost / batch_size
return cost
示例11: cross_entropy_seq_with_mask
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Dynamic RNN outputs.
Parameters
-----------
logits : network identity outputs
2D tensor, ``network.outputs``, [batch_size, number of output units].
target_seqs : int of tensor, like word ID.
[batch_size, ?]
input_mask : the mask to compute loss
The same size with target_seqs, normally 0 and 1.
return_details : boolean
- If False (default), only returns the loss.
- If True, returns the loss, losses, weights and targets (reshape to one vetcor).
Examples
--------
- see Image Captioning Example.
"""
targets = tf.reshape(target_seqs, [-1]) # to one vector
weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
#losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others
try: ## TF1.0
loss = tf.divide(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
tf.reduce_sum(weights),
name="seq_loss_with_mask")
except: ## TF0.12
loss = tf.div(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
tf.reduce_sum(weights),
name="seq_loss_with_mask")
if return_details:
return loss, losses, weights, targets
else:
return loss
示例12: testIndexedSlicesGradientInCondInWhileLoop
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def testIndexedSlicesGradientInCondInWhileLoop(self):
with ops.Graph().as_default():
embedding_matrix = tf.get_variable(
"embedding_matrix", [5, 5],
initializer=tf.random_normal_initializer())
def Cond(it, _):
return it < 5
def Body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
cost = tf.cond(tf.equal(it, 3),
lambda: tf.square(cost),
lambda: cost + tf.reduce_sum(embedding))
return it + 1, cost
_, cost = control_flow_ops.while_loop(
Cond, Body, [tf.constant(0), tf.constant(0.0)])
dynamic_grads = tf.gradients(cost, [embedding_matrix])[0]
dynamic_grads = tf.segment_sum(dynamic_grads.values,
dynamic_grads.indices)
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
static = tf.square(
tf.reduce_sum(embedding) +
tf.reduce_sum(embedding) +
tf.reduce_sum(embedding)) + tf.reduce_sum(embedding)
static_grads = tf.gradients(static, [embedding_matrix])[0]
static_grads = tf.segment_sum(static_grads.values, static_grads.indices)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
示例13: testIndexedSlicesWithShapeGradientInWhileLoop
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def testIndexedSlicesWithShapeGradientInWhileLoop(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session() as sess:
num_steps = 9
inputs = tf.placeholder(dtype=dtype, shape=[num_steps])
initial_outputs = tf.TensorArray(dtype=dtype, size=num_steps)
initial_i = tf.constant(0, dtype=dtypes.int32)
def Cond(i, _):
return i < num_steps # pylint: disable=cell-var-from-loop
def Body(i, outputs):
x = tf.gather(inputs, i) # pylint: disable=cell-var-from-loop
outputs = outputs.write(i, x)
return i + 1, outputs
_, outputs = tf.while_loop(Cond, Body, [initial_i, initial_outputs])
outputs = tf.reduce_sum(outputs.pack())
r = tf.gradients([outputs], [inputs])[0]
grad_wr_inputs = ops.convert_to_tensor(r)
o, grad = sess.run([outputs, grad_wr_inputs],
feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
self.assertEquals(o, 20)
self.assertAllEqual(grad, [1] * num_steps)
示例14: l1_regularizer
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
示例15: iou_coe
# 需要导入模块: from tensorflow.python.ops import standard_ops [as 别名]
# 或者: from tensorflow.python.ops.standard_ops import reduce_sum [as 别名]
def iou_coe(output, target, threshold=0.5, epsilon=1e-10):
"""Non-differentiable Intersection over Union, usually be used for evaluating binary image segmentation.
The coefficient = [0, 1], 1 means totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
threshold : float
The threshold value to be true.
epsilon : float
A small value to avoid zero denominator when both output and target output nothing.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> iou = tl.cost.iou_coe(outputs[:,:,:,0], y_[:,:,:,0])
Notes
------
- IOU cannot be used as training loss, people usually use dice coefficient for training, and IOU for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
intersection = tf.reduce_sum(pre * truth)
union = tf.reduce_sum(tf.cast((pre + truth) > threshold, dtype=tf.float32))
return tf.reduce_sum(intersection) / (tf.reduce_sum(union) + epsilon)