本文整理汇总了Python中tensorflow.python.ops.math_ops.scalar_mul方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.scalar_mul方法的具体用法?Python math_ops.scalar_mul怎么用?Python math_ops.scalar_mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.scalar_mul方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _SquaredDifferenceGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
# .op works with Tensors or IndexedSlices
with ops.control_dependencies([grad.op]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
示例2: _SquaredDifferenceGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:math_grad.py
示例3: testAcceptsRefs
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def testAcceptsRefs(self):
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
self.assertEqual(30, result.eval())
示例4: testAcceptsConstant
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with self.test_session(use_gpu=True):
self.assertEqual(30, result.eval())
示例5: testAcceptsTensor
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with self.test_session(use_gpu=True):
self.assertAllEqual(expected.eval(), result.eval())
示例6: testAcceptsIndexedSlices
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
with self.test_session(use_gpu=True):
self.assertAllEqual(x.values.eval(), [[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(x.indices.eval(), [0, 2, 5])
示例7: clip_norm
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import scalar_mul [as 别名]
def clip_norm(g, c, n):
"""Clip a tensor by norm.
Arguments:
g: gradient tensor to clip.
c: clipping threshold.
n: norm of gradient tensor.
Returns:
Clipped gradient tensor.
"""
if c > 0:
condition = n >= c
then_expression = lambda: math_ops.scalar_mul(c / n, g)
else_expression = lambda: g
# saving the shape to avoid converting sparse tensor to dense
if isinstance(g, ops.Tensor):
g_shape = copy.copy(g.get_shape())
elif isinstance(g, ops.IndexedSlices):
g_shape = copy.copy(g.dense_shape)
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
g = control_flow_ops.cond(condition, then_expression, else_expression)
if isinstance(g, ops.Tensor):
g.set_shape(g_shape)
elif isinstance(g, ops.IndexedSlices):
g._dense_shape = g_shape # pylint: disable=protected-access
return g
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:31,代码来源:optimizers.py