本文整理汇总了Python中tensorflow.python.eager.context.executing_eagerly方法的典型用法代码示例。如果您正苦于以下问题:Python context.executing_eagerly方法的具体用法?Python context.executing_eagerly怎么用?Python context.executing_eagerly使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.eager.context
的用法示例。
在下文中一共展示了context.executing_eagerly方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: softmax
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
if not context.executing_eagerly():
predictions.set_shape(logits.get_shape())
return predictions
示例2: call
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def call(self, inputs):
w = self.compute_spectral_norm()
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
rank = common_shapes.rank(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, w, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, w)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
示例3: testRaggedPadDimensionErrors
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def testRaggedPadDimensionErrors(self):
ragged_data = ragged_factory_ops.constant([[1, 2], [3, 4]])
self.assertRaisesRegexp(
errors.InvalidArgumentError,
'axis must be between -k <= axis <= -1 OR 0 <= axis < k',
pad_along_dimension_op.pad_along_dimension,
ragged_data,
left_pad=[0],
axis=2)
self.assertRaisesRegexp(
ValueError,
r'Shapes .* are incompatible',
pad_along_dimension_op.pad_along_dimension,
ragged_data,
axis=1,
left_pad=ragged_data)
if not context.executing_eagerly():
self.assertRaisesRegexp(
ValueError, 'axis may not be negative if data is ragged '
'and data.ndims is not statically known.',
pad_along_dimension_op.pad_along_dimension,
ragged_tensor.RaggedTensor.from_tensor(
array_ops.placeholder_with_default([[1, 2], [3, 4]], shape=None)),
left_pad=[0],
axis=-1)
示例4: _create_slots
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
if StrictVersion(tf.__version__) >= StrictVersion('1.10.0'):
graph = None if context.executing_eagerly() else ops.get_default_graph()
else:
graph = ops.get_default_graph()
create_new = self._get_non_slot_variable("beta1_power", graph) is None
if not create_new and context.in_graph_mode():
create_new = (self._get_non_slot_variable("beta1_power", graph).graph is not first_var.graph)
if create_new:
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2,
name="beta2_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._gamma,
name="gamma_multi",
colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list :
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "vhat", self._name)
示例5: _create_slots
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
graph = None if context.executing_eagerly() else ops.get_default_graph()
create_new = self._get_non_slot_variable("beta1_power", graph) is None
if not create_new and context.in_graph_mode():
create_new = (self._get_non_slot_variable("beta1_power", graph).graph is not first_var.graph)
if create_new:
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2,
name="beta2_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._gamma,
name="gamma_multi",
colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list :
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "vhat", self._name)
示例6: _finish
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
graph = None if context.executing_eagerly() else ops.get_default_graph()
beta1_power = self._get_non_slot_variable("beta1_power", graph=graph)
beta2_power = self._get_non_slot_variable("beta2_power", graph=graph)
gamma_multi = self._get_non_slot_variable("gamma_multi", graph=graph)
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
update_gamma = gamma_multi.assign(
gamma_multi + self._gamma_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2, update_gamma],
name=name_scope)
示例7: __init__
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer)
)
if not context.executing_eagerly() and data_init:
raise NotImplementedError(
"Data dependent variable initialization is not available for " "graph execution"
)
self.initialized = True
if data_init:
self.initialized = False
self.layer_depth = None
self.norm_axes = None
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_trackable(layer, name="layer")
示例8: _create_slots
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
graph = None if context.executing_eagerly() else ops.get_default_graph()
# Create slots for the first and second moments.
for v in var_list :
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "vhat", self._name)
示例9: _get_beta_weights
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _get_beta_weights(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (
self._get_non_slot_variable("beta1_weight", graph=graph),
self._get_non_slot_variable("beta2_weight", graph=graph),
)
示例10: testErrors
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def testErrors(self):
t = [10, 20, 30, 40, 50]
with self.assertRaisesRegexp(TypeError, 'contains must be bool.'):
pointer_ops.span_overlaps(t, t, t, t, contains='x')
with self.assertRaisesRegexp(TypeError, 'contained_by must be bool.'):
pointer_ops.span_overlaps(t, t, t, t, contained_by='x')
with self.assertRaisesRegexp(TypeError, 'partial_overlap must be bool.'):
pointer_ops.span_overlaps(t, t, t, t, partial_overlap='x')
with self.assertRaisesRegexp(
TypeError, 'source_start, source_limit, target_start, and '
'target_limit must all have the same dtype'):
pointer_ops.span_overlaps(t, t, t, [1.0, 2.0, 3.0, 4.0, 5.0])
with self.assertRaisesRegexp(ValueError,
r'Shapes \(5,\) and \(4,\) are incompatible'):
pointer_ops.span_overlaps(t, t[:4], t, t)
with self.assertRaisesRegexp(ValueError,
r'Shapes \(4,\) and \(5,\) are incompatible'):
pointer_ops.span_overlaps(t, t, t[:4], t)
with self.assertRaisesRegexp(
ValueError, r'Shapes \(1, 5\) and \(5,\) must have the same rank'):
pointer_ops.span_overlaps([t], [t], t, t)
if not context.executing_eagerly():
with self.assertRaisesRegexp(
ValueError, 'For ragged inputs, the shape.ndims of at least one '
'span tensor must be statically known.'):
x = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32), [0, 3, 8])
pointer_ops.span_overlaps(x, x, x, x)
with self.assertRaisesRegexp(
ValueError, 'Span tensors must all have the same ragged_rank'):
a = [[10, 20, 30], [40, 50, 60]]
pointer_ops.span_overlaps(a, a, a, ragged_factory_ops.constant(a))
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Mismatched ragged shapes for batch dimensions'):
rt1 = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5]]])
rt2 = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5], [6]]])
pointer_ops.span_overlaps(rt1, rt1, rt2, rt2)
示例11: _get_beta_accumulators
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("step", graph=graph),
self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
示例12: __op
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def __op(self, kernel, inputs, shape):
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = tf.tensordot(inputs, kernel, [[len(shape) - 1],[0]])
# Reshape the output back to the original ndim of the input.
# if context.in_graph_mode():
# for tf > 1.5.0
if not context.executing_eagerly():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = tf.matmul(inputs, kernel)
return outputs
示例13: call
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
if self.rectify:
inputs = nn.relu(inputs)
# Compute normalization pool.
if ndim == 2:
norm_pool = math_ops.matmul(math_ops.square(inputs), self.gamma)
norm_pool = nn.bias_add(norm_pool, self.beta)
elif self.data_format == "channels_last" and ndim <= 5:
shape = self.gamma.shape.as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
norm_pool = nn.convolution(math_ops.square(inputs), gamma, "VALID")
norm_pool = nn.bias_add(norm_pool, self.beta)
else: # generic implementation
# This puts channels in the last dimension regardless of input.
norm_pool = math_ops.tensordot(
math_ops.square(inputs), self.gamma, [[self._channel_axis()], [0]])
norm_pool += self.beta
if self.data_format == "channels_first":
# Return to channels_first format if necessary.
axes = list(range(ndim - 1))
axes.insert(1, ndim - 1)
norm_pool = array_ops.transpose(norm_pool, axes)
if self.inverse:
norm_pool = math_ops.sqrt(norm_pool)
else:
norm_pool = math_ops.rsqrt(norm_pool)
outputs = inputs * norm_pool
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
示例14: _get_la_step_accumulators
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _get_la_step_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return self._get_non_slot_variable("la_step", graph=graph)
示例15: _get_beta_accumulators
# 需要导入模块: from tensorflow.python.eager import context [as 别名]
# 或者: from tensorflow.python.eager.context import executing_eagerly [as 别名]
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))