本文整理汇总了Python中tensorflow.python.ops.math_ops.negative函数的典型用法代码示例。如果您正苦于以下问题:Python negative函数的具体用法?Python negative怎么用?Python negative使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了negative函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSideEffect
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
n_out = sess.run([n])
d_out = sess.run([d])
self.assertEquals(n_out, [-2])
self.assertEquals(c_out, [2])
self.assertEquals(d_out, [42])
self.assertEquals(shared, [2, 2, 2])
示例2: testSideEffect
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
示例3: testInitializerFunction
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.test_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, v1.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v1.eval()
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), v2.initial_value.eval())
# Once v2.initial_value.eval() has been called, v1 has effectively been
# initialized.
self.assertAllClose(value, v1.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v2.eval()
variables.global_variables_initializer().run()
self.assertAllClose(np.negative(value), v2.eval())
示例4: setUp
def setUp(self):
self.a = variables.VariableV1(2.0, name="a")
self.b = variables.VariableV1(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.VariableV1(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
self.sess.run(variables.global_variables_initializer())
示例5: decayed_lr
def decayed_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(
math_ops.multiply(math_ops.negative(decay_rate), p))
return math_ops.multiply(learning_rate, exponent, name=name)
示例6: _FloorModGrad
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
示例7: _XDivyGrad
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
示例8: _createGraph
def _createGraph(self):
"""Create graph for testing.
Returns:
Python Graph object.
"""
with ops.Graph().as_default() as graph:
with ops.device("/job:worker/task:0/cpu:0"):
self.a = variables.VariableV1(10.0, name="a")
self.b = variables.VariableV1(100.0, name="b")
self.inc_a = state_ops.assign_add(self.a, 2.0, name="inc_a")
self.dec_b = state_ops.assign_add(self.b, -5.0, name="dec_b")
self.p = math_ops.multiply(self.inc_a, self.dec_b, name="p")
self.q = math_ops.negative(self.p, name="q")
return graph
示例9: decayed_lr
def decayed_lr(learning_rate, global_step, decay_steps, decay_rate, staircase,
name):
"""Helper to recompute learning rate; most helpful in eager-mode."""
with ops.name_scope(name, "NaturalExpDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
p = global_step_recomp / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(
math_ops.multiply(math_ops.negative(decay_rate), p))
return math_ops.multiply(learning_rate, exponent, name=name)
示例10: setUp
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
示例11: testInitializerFunction
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
示例12: GetParams
def GetParams(self):
"""Test for unary operations in TF-TRT."""
dtype = dtypes.float32
input_name = "input"
input_dims = [12, 5, 8, 1, 1, 12]
input2_name = "input_2"
input2_dims = [12, 5, 8, 1, 12, 1, 1]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
q = math_ops.abs(x)
q = q + 1.0
q = gen_math_ops.exp(q)
q = gen_math_ops.log(q)
q = array_ops.squeeze(q, axis=-2)
q = math_ops.abs(q)
q = q + 2.2
q = gen_math_ops.sqrt(q)
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = array_ops.squeeze(q, axis=3)
q = math_ops.abs(q)
q = q + 3.0
a = gen_math_ops.reciprocal(q)
x = constant_op.constant(np.random.randn(5, 8, 12), dtype=dtype)
q = math_ops.abs(x)
q = q + 2.0
q = gen_math_ops.exp(q)
q = gen_math_ops.log(q)
q = math_ops.abs(q)
q = q + 2.1
q = gen_math_ops.sqrt(q)
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = math_ops.abs(q)
q = q + 4.0
b = gen_math_ops.reciprocal(q)
# TODO(jie): this one will break, broadcasting on batch.
x = array_ops.placeholder(
dtype=dtype, shape=input2_dims, name=input2_name)
q = math_ops.abs(x)
q = q + 5.0
q = gen_math_ops.exp(q)
q = array_ops.squeeze(q, axis=[-1, -2, 3])
q = gen_math_ops.log(q)
q = math_ops.abs(q)
q = q + 5.1
q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
q = array_ops.squeeze(q, axis=[5, 2, 3])
q = gen_math_ops.sqrt(q)
q = math_ops.abs(q)
q = q + 5.2
q = gen_math_ops.rsqrt(q)
q = math_ops.negative(q)
q = math_ops.abs(q)
q = q + 5.3
c = gen_math_ops.reciprocal(q)
q = a * b
q = q / c
array_ops.squeeze(q, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name, input2_name],
input_dims=[input_dims, input2_dims],
num_expected_engines=5,
expected_output_dims=(12, 5, 8, 12),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
示例13: natural_exp_decay
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
natural_exp_rate = math_ops.exp(math_ops.negative(decay_rate))
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate,
decay_steps,
natural_exp_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
示例14: training_loss
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
示例15: validation_loss
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())