本文整理汇总了Python中tensorflow.compat.v1.function方法的典型用法代码示例。如果您正苦于以下问题:Python v1.function方法的具体用法?Python v1.function怎么用?Python v1.function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.function方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_stylize_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def get_stylize_fn():
"""Creates a tf.function for stylization."""
input_spec = [
tf.TensorSpec((None, None, None, 3), tf.float32),
tf.TensorSpec((None, None, None, 3), tf.float32)
]
predict_feeds = []
predict_fetches = []
def umbrella_function(content_img, style_img):
predict_feeds.extend([content_img, style_img])
predict_result = build_network(content_img, style_img)
predict_fetches.extend([
predict_result,
])
return predict_result
umbrella_wrapped = tf.compat.v1.wrap_function(umbrella_function, input_spec)
fn = umbrella_wrapped.prune(predict_feeds, predict_fetches)
return fn
示例2: test_context_manager
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def test_context_manager(self, noise, epsilon, class_weights):
"""Tests the context manager functionality of the optimizer.
Args:
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
class_weights: class_weights to use
"""
@tf.function
def test_run():
loss = TestLoss(1, 1, 1)
bolton = opt.BoltOn(TestOptimizer(), loss)
model = TestModel(1, (1,), 1)
model.compile(bolton, loss)
model.layers[0].kernel = \
model.layers[0].kernel_initializer((model.layer_input_shape[0],
model.n_outputs))
with bolton(noise, epsilon, model.layers, class_weights, 1, 1) as _:
pass
return _ops.convert_to_tensor_v2(bolton.epsilon, dtype=tf.float32)
epsilon = test_run()
self.assertEqual(epsilon.numpy(), -1)
示例3: test_context_domains
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def test_context_domains(self, noise, epsilon, err_msg):
"""Tests the context domains.
Args:
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
err_msg: the expected error message
"""
@tf.function
def test_run(noise, epsilon):
loss = TestLoss(1, 1, 1)
bolton = opt.BoltOn(TestOptimizer(), loss)
model = TestModel(1, (1,), 1)
model.compile(bolton, loss)
model.layers[0].kernel = \
model.layers[0].kernel_initializer((model.layer_input_shape[0],
model.n_outputs))
with bolton(noise, epsilon, model.layers, 1, 1, 1) as _:
pass
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
test_run(noise, epsilon)
示例4: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def __init__(self, weight_path):
helpers.ensure_lpips_weights_exist(weight_path)
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.graph_util.import_graph_def(graph_def, name="")
wrapped_import = tf.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
# Pack LPIPS network into a tf function
graph_def = tf.GraphDef()
with open(weight_path, "rb") as f:
graph_def.ParseFromString(f.read())
self._lpips_func = tf.function(
wrap_frozen_graph(
graph_def, inputs=("0:0", "1:0"), outputs="Reshape_10:0"))
示例5: test_blackout_pixel_weights_by_box_regions
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:6, 0:6].
self.assertAlmostEqual(np.sum(output[0:6, 0:6]), 0.0)
# All zeros in region [12:19, 6:9].
self.assertAlmostEqual(np.sum(output[6:9, 12:19]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 6 * 6 - 3 * 7 = 143.0
self.assertAlmostEqual(np.sum(output), 143.0)
示例6: execute_cpu
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def execute_cpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
return self.execute_cpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
示例7: execute_tpu
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def execute_tpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.has_tpu():
raise ValueError('No TPU Device found.')
if self.is_tf2():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_tpu_tf1(compute_fn, inputs, graph)
示例8: execute_tf2
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def execute_tf2(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 2.0.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
if self.has_tpu():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf2(compute_fn, inputs)
示例9: execute_tf1
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def execute_tf1(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with TensorFlow 1.X.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
if self.has_tpu():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
示例10: execute
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def execute(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with inputs and returns results.
* Executes in either TF1.X or TF2.X style based on the TensorFlow version.
* Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.has_tpu() and tf2.enabled():
return self.execute_tpu_tf2(compute_fn, inputs)
elif not self.has_tpu() and tf2.enabled():
return self.execute_cpu_tf2(compute_fn, inputs)
elif self.has_tpu() and not tf2.enabled():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
示例11: _test_spop_placeholder_without_shape_info
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32]*2)
def Forward(x,y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32,name="pl1")
pl2 = tf.placeholder(tf.int32,name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'],
['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True)
示例12: _test_spop_function_invocation_basic
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a,a)
def fun2(b):
return tf.multiply(b,10)
@tf.function
def fun3(x,y):
x = fun2(x)
y = fun1(y)
z = tf.add(x,y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True)
示例13: _test_spop_function_invocation_nested
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l,z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True)
示例14: _test_spop_function_invocation_no_autograph
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a,a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b,10)
@tf.function
def fun3(x,y):
x = fun2(x)
y = fun1(y)
z = tf.add(x,y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True)
示例15: _test_spop_function_invocation_defun
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import function [as 别名]
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a,a)
def fun2(b):
return tf.multiply(b,b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x,y):
x = fun2(x)
y = fun1(y)
z = tf.add(x,y)
return z
op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)],
Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation")
compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True)