本文整理汇总了Python中tensorflow.compat.v1.executing_eagerly方法的典型用法代码示例。如果您正苦于以下问题:Python v1.executing_eagerly方法的具体用法?Python v1.executing_eagerly怎么用?Python v1.executing_eagerly使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.executing_eagerly方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_run_in_graph_and_eager_modes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if tf.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_utils.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_utils.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
示例2: test_run_in_graph_and_eager_modes_setup_in_same_mode
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if tf.executing_eagerly() else "graph"
class ExampleTest(tf.test.TestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_utils.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_eager", "run_eager"])
self.assertEqual(modes[2:], ["setup_graph", "run_graph"])
示例3: remove
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
示例4: summarize_video
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
示例5: softmax
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
if not tf.executing_eagerly():
predictions.set_shape(logits.get_shape())
return predictions
示例6: _get_weights
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def _get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Copied from tensor2tensor/layers/modalities.py but uses total vocab."""
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (sum(vocab_size) // num_shards) + (
1 if i < sum(vocab_size) % num_shards else 0)
var_name = 'weights_%d' % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret
示例7: pretrained_visual_encoder
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def pretrained_visual_encoder(self, features, hparams):
# we want the exact hparams used for training this vv
vae_hparams = trainer_lib.create_hparams(
hparams.vae_hparam_set, hparams.vae_hparams,
data_dir=hparams.vae_data_dir, problem_name=hparams.vae_problem)
# go back to root variable scope
with tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, ''),
reuse=tf.AUTO_REUSE, auxiliary_name_scope=False):
vae = image_vae.ImageVAE(vae_hparams, mode=self._hparams.mode,
problem_hparams=vae_hparams.problem_hparams)
# the real input to vae will be features['rendered_targets']
vae_features = copy.copy(features)
vae_features['inputs'] = tf.reshape(vae_features['targets_psr'][:, -1, :],
[-1, 64, 64, 1])
vae_features['targets'] = vae_features['inputs']
# we want vae to return bottleneck
vae_features['bottleneck'] = tf.zeros((0, 128))
sampled_bottleneck, _ = vae(vae_features)
vae.initialize_from_ckpt(hparams.vae_ckpt_dir)
if tf.executing_eagerly():
sampled_bottleneck, _ = vae(vae_features)
return sampled_bottleneck
示例8: inference_network_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def inference_network_fn(self,
features,
labels,
mode,
config=None,
params=None):
"""See base class documentation."""
del mode, config, params
if not self._model:
self._build_model()
if self._multi_dataset:
if tf.executing_eagerly():
x1 = tf.convert_to_tensor(features.x1)
x2 = tf.convert_to_tensor(features.x2)
else:
x1 = features.x1
x2 = features.x2
net = x1 + x2
else:
net = features.x
net = self._model(net)
return dict(logits=net)
示例9: call
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
rank = tf.rank(inputs)
if rank > 2:
outputs = tf.einsum("aki,aij->akj", inputs, self.kernel)
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
assert False
# outputs = tf.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例10: get_global_variables_safely
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def get_global_variables_safely():
"""If not executing eagerly, returns tf.global_variables().
Raises a ValueError if eager execution is enabled,
because the variables are not tracked when executing eagerly.
If executing eagerly, use a Keras model's .variables property instead.
Returns:
The result of tf.global_variables()
"""
with tf.init_scope():
if tf.executing_eagerly():
raise ValueError("Global variables collection is not tracked when "
"executing eagerly. Use a Keras model's `.variables` "
"attribute instead.")
return tf.global_variables()
示例11: get_variable_initializer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def get_variable_initializer(hparams):
"""Get variable initializer from hparams."""
if not hparams.initializer:
return None
mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN,
value=hparams.initializer_gain,
hparams=hparams)
if not tf.executing_eagerly():
tf.logging.info("Using variable initializer: %s", hparams.initializer)
if hparams.initializer == "orthogonal":
return tf.orthogonal_initializer(gain=hparams.initializer_gain)
elif hparams.initializer == "uniform":
max_val = 0.1 * hparams.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif hparams.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="normal")
elif hparams.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="uniform")
elif hparams.initializer == "xavier":
return tf.initializers.glorot_uniform()
else:
raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
示例12: flatten_all_but_last
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def flatten_all_but_last(a):
"""Flatten all dimensions of a except the last."""
ret = tf.reshape(a, [-1, tf.shape(a)[-1]])
if not tf.executing_eagerly():
ret.set_shape([None] + a.get_shape().as_list()[-1:])
return ret
示例13: _eager_log
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def _eager_log(level, *args):
if tf.executing_eagerly() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
示例14: _get_beta_accumulators
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def _get_beta_accumulators(self):
with tf.init_scope():
if tf.executing_eagerly():
graph = None
else:
graph = tf.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
示例15: _get_iter_variable
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import executing_eagerly [as 别名]
def _get_iter_variable(self):
graph = (None if tf.executing_eagerly() else tf.get_default_graph())
return self._get_non_slot_variable("iter", graph=graph)