本文整理匯總了Python中tensorflow.python.framework.function.Defun方法的典型用法代碼示例。如果您正苦於以下問題:Python function.Defun方法的具體用法?Python function.Defun怎麽用?Python function.Defun使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.function
的用法示例。
在下文中一共展示了function.Defun方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: BatchMatMul
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def BatchMatMul(a, b):
use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true")
xla_compile = (os.environ["xla_compile"] == "true")
if use_fp32_batch_matmul:
def DoFn(a, b):
dtype = a.dtype
a = tf.to_float(a)
b = tf.to_float(b)
return tf.cast(tf.matmul(a, b), dtype)
# If using xla_compile, the fwd and bak per tower are wrapped in xla_compile
if not xla_compile:
DoFn = function.Defun(noinline=True)(DoFn)
res = DoFn(a, b)
res.set_shape((None, None, b.shape[-1].value))
else:
# If xla_compile, leave to xla to handle the casts.
res = DoFn(a, b)
else:
res = tf.matmul(a, b)
return res
示例2: testWhileFuncBasic
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testWhileFuncBasic(self):
@function.Defun(tf.float32)
def func(x):
return tf.square(tf.square(x))
with self.test_session():
x = tf.constant(2.0, tf.float32)
r = tf.while_loop(
lambda i, v: i < 2,
lambda i, v: [i + 1, func(v)],
[tf.constant(0), x],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = tf.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(len([op for op in x.graph.get_operations()
if op.type == "Stack"]),
1)
示例3: testGradientFunc
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testGradientFunc(self):
@function.Defun(tf.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(tf.float32, tf.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = tf.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
示例4: testTanhSymGrad
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testTanhSymGrad(self):
@function.Defun(tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
示例5: testCustomGradientError
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testCustomGradientError(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = tf.Graph()
with g.as_default():
inp = tf.placeholder(dtype)
out = tf.add_n(Forward(inp))
dinp = tf.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with tf.Session(graph=g) as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
示例6: testSymGradAttr
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([tf.float32]).definition.attr["_noinline"].b)
g = tf.Graph()
with g.as_default():
x = tf.constant(3.0)
y = Foo(x)
dx, = tf.gradients(y, [x])
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.test_session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
示例7: testDefineFunctionNames
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testDefineFunctionNames(self):
@function.Defun(tf.float32, func_name="Foo")
def Foo(a):
return a + 1
with tf.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with tf.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
示例8: testFunctionDecorator
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testFunctionDecorator(self):
@function.Defun(tf.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with tf.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with tf.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
示例9: testCapture
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testCapture(self):
g = tf.Graph()
with g.as_default():
w = tf.Variable(tf.constant([[1.0]]))
b = tf.Variable(tf.constant([2.0]))
# Foo() captures w and b.
@function.Defun(tf.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(tf.float32)
def Plus(y):
return y + b
return Plus(tf.matmul(w, x))
y = Foo(tf.constant([[10.]]))
with self.test_session(graph=g):
tf.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
示例10: testGradient
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testGradient(self):
@function.Defun(func_name="Spec")
def G(x, dy):
return x * dy
@function.Defun(grad_func=G)
def F(x):
return tf.exp(x) - tf.exp(-x)
for dtype in [tf.float32, tf.float64]:
g = tf.Graph()
with g.as_default():
x = tf.constant(0.25, dtype)
y = F(x)
dx, = tf.gradients(y, x)
with self.test_session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
示例11: testTPUModuleInitializeOnceWithDefun
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testTPUModuleInitializeOnceWithDefun(self):
spec = hub.create_module_spec(stateful_random_rv_module_fn)
@function.Defun()
def import_computation():
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return [m(), m()]
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation()
sess.run(tf_v1.global_variables_initializer())
got = sess.run(x)
# Check the values are equal. If the initializer ran on each call,
# the values would be different.
self.assertEqual(got[0], got[1])
示例12: testTPUPruneWithUnusedInput
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testTPUPruneWithUnusedInput(self):
spec = hub.create_module_spec(unused_input_module_fn)
@function.Defun()
def import_computation(x):
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return m({
"x": tf.cast(x, dtype=tf.int64),
"unused": tf.constant(2, dtype=tf.int64)
})
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation(5)
got = sess.run(x)
self.assertEqual(got, 25)
示例13: testTPUModuleDoesntPruneControlDependencies
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def testTPUModuleDoesntPruneControlDependencies(self):
spec = hub.create_module_spec(control_dependency_module_fn)
@function.Defun()
def import_computation():
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return m()
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation()
got = sess.run(x)
self.assertEqual(got, 5.0)
# If the op got pruned, the following get_operation_by_name should fail
# with a dependency error.
tf_v1.get_default_graph().get_operation_by_name("module_/dependency_op")
示例14: _test_spop_placeholder_without_shape_info
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32]*2)
def Forward(x,y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32,name="pl1")
pl2 = tf.placeholder(tf.int32,name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'],
['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True)
示例15: _test_spop_function_invocation_defun
# 需要導入模塊: from tensorflow.python.framework import function [as 別名]
# 或者: from tensorflow.python.framework.function import Defun [as 別名]
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a,a)
def fun2(b):
return tf.multiply(b,b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x,y):
x = fun2(x)
y = fun1(y)
z = tf.add(x,y)
return z
op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)],
Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation")
compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True)