本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.with_dependencies函数的典型用法代码示例。如果您正苦于以下问题:Python with_dependencies函数的具体用法?Python with_dependencies怎么用?Python with_dependencies使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了with_dependencies函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testIndexedSlices
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()), tf.constant([1])
)
v2 = tf.Variable(np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()), tf.constant([1])
)
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]], v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v1.eval())
示例2: testWithTensorDependencies
def testWithTensorDependencies(self):
with self.test_session():
v = tf.Variable(0.0)
c1 = tf.constant(10)
c2 = tf.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v",
output_tensor=c1,
dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
示例3: _check_shapes_dynamic
def _check_shapes_dynamic(self, operator, v, diag):
"""Return (v, diag) with Assert dependencies, which check shape."""
checks = []
with ops.op_scope([operator, v, diag], 'check_shapes'):
s_v = array_ops.shape(v)
r_op = operator.rank()
r_v = array_ops.rank(v)
if diag is not None:
s_d = array_ops.shape(diag)
r_d = array_ops.rank(diag)
# Check tensor rank.
checks.append(check_ops.assert_rank(v, r_op))
if diag is not None:
checks.append(check_ops.assert_rank(diag, r_op - 1))
# Check batch shape
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2])))
if diag is not None:
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1])))
# Check event shape
checks.append(check_ops.assert_equal(
operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2)))
if diag is not None:
checks.append(check_ops.assert_equal(
array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1)))
v = control_flow_ops.with_dependencies(checks, v)
if diag is not None:
diag = control_flow_ops.with_dependencies(checks, diag)
return v, diag
示例4: testTensors
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable([1.0])
add1 = tf.add(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
2.0)
v2 = tf.Variable([10.0])
add2 = tf.add(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
示例5: kl_multivariate_normal
def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
"""Calculate the KL of multivariate normal distributions with
diagonal covariances.
Parameters
----------
loc_one : tf.Tensor
A 0-D tensor, 1-D tensor of length n, or 2-D tensor of shape M
x n where each row represents the mean of a n-dimensional
Gaussian.
scale_one : tf.Tensor
A tensor of same shape as ``loc_one``, representing the
standard deviation.
loc_two : tf.Tensor, optional
A tensor of same shape as ``loc_one``, representing the
mean of another Gaussian.
scale_two : tf.Tensor, optional
A tensor of same shape as ``loc_one``, representing the
standard deviation of another Gaussian.
Returns
-------
tf.Tensor
For 0-D or 1-D tensor inputs, outputs the 0-D tensor
``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
For 2-D tensor inputs, outputs the 1-D tensor
``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``
Raises
------
InvalidArgumentError
If the location variables have Inf or NaN values, or if the scale
variables are not positive.
"""
dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
tf.verify_tensor_all_finite(loc_two, msg=''),
tf.assert_positive(scale_one),
tf.assert_positive(scale_two)]
loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
loc_one = tf.cast(loc_one, tf.float32)
scale_one = tf.cast(scale_one, tf.float32)
if loc_two == 0.0 and scale_two == 1.0:
# With default arguments, we can avoid some intermediate computation.
out = tf.square(scale_one) + tf.square(loc_one) - \
1.0 - 2.0 * tf.log(scale_one)
else:
loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
loc_two = tf.cast(loc_two, tf.float32)
scale_two = tf.cast(scale_two, tf.float32)
out = tf.square(scale_one/scale_two) + \
tf.square((loc_two - loc_one)/scale_two) - \
1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one)
if len(out.get_shape()) <= 1: # scalar or vector
return 0.5 * tf.reduce_sum(out)
else: # matrix
return 0.5 * tf.reduce_sum(out, 1)
示例6: _verify_input
def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Since number of classes shouldn't change at runtime, probabilities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
# Probabilities must be 1D.
probs.get_shape().assert_has_rank(1)
# Probabilities must be nonnegative and sum to one.
tol = 1e-6
prob_sum = math_ops.reduce_sum(probs)
checked_probs = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(probs),
check_ops.assert_less(prob_sum, 1.0 + tol),
check_ops.assert_less(1.0 - tol, prob_sum)
], probs)
checked_probs_list.append(checked_probs)
# All probabilities should be the same length.
prob_length = checked_probs_list[0].get_shape().num_elements()
for checked_prob in checked_probs_list:
if checked_prob.get_shape().num_elements() != prob_length:
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
for tensor in tensor_list:
# Data tensor should have a batch dimension.
shape = tensor.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
labels_batch_size = array_ops.shape(labels)[0]
lbl_assert = check_ops.assert_positive(labels_batch_size)
# Make each tensor depend on its own checks.
labels = control_flow_ops.with_dependencies([lbl_assert], labels)
tensor_list = [
control_flow_ops.with_dependencies([
lbl_assert,
check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
], x) for x in tensor_list
]
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies([
check_ops.assert_integer(labels), check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
], labels)
return tensor_list, labels, checked_probs_list
示例7: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
tf.assert_positive(x, message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
tf.assert_none_equal(
x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
], x)
示例8: rbf
def rbf(X, X2=None, lengthscale=1.0, variance=1.0):
"""Radial basis function kernel, also known as the squared
exponential or exponentiated quadratic. It is defined as
$k(x, x') = \sigma^2 \exp\Big(
-\\frac{1}{2} \sum_{d=1}^D \\frac{1}{\ell_d^2} (x_d - x'_d)^2 \Big)$
for output variance $\sigma^2$ and lengthscale $\ell^2$.
The kernel is evaluated over all pairs of rows, `k(X[i, ], X2[j, ])`.
If `X2` is not specified, then it evaluates over all pairs
of rows in `X`, `k(X[i, ], X[j, ])`. The output is a matrix
where each entry (i, j) is the kernel over the ith and jth rows.
Args:
X: tf.Tensor.
N x D matrix of N data points each with D features.
X2: tf.Tensor.
N x D matrix of N data points each with D features.
lengthscale: tf.Tensor.
Lengthscale parameter, a positive scalar or D-dimensional vector.
variance: tf.Tensor.
Output variance parameter, a positive scalar.
#### Examples
```python
X = tf.random_normal([100, 5])
K = ed.rbf(X)
assert K.shape == (100, 100)
```
"""
lengthscale = tf.convert_to_tensor(lengthscale)
variance = tf.convert_to_tensor(variance)
dependencies = [tf.assert_positive(lengthscale),
tf.assert_positive(variance)]
lengthscale = control_flow_ops.with_dependencies(dependencies, lengthscale)
variance = control_flow_ops.with_dependencies(dependencies, variance)
X = tf.convert_to_tensor(X)
X = X / lengthscale
Xs = tf.reduce_sum(tf.square(X), 1)
if X2 is None:
X2 = X
X2s = Xs
else:
X2 = tf.convert_to_tensor(X2)
X2 = X2 / lengthscale
X2s = tf.reduce_sum(tf.square(X2), 1)
square = tf.reshape(Xs, [-1, 1]) + tf.reshape(X2s, [1, -1]) - \
2 * tf.matmul(X, X2, transpose_b=True)
output = variance * tf.exp(-square / 2)
return output
示例9: _check_domain_range_possibly_add_asserts
def _check_domain_range_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
self._num_columns = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_columns,
0,
message="Argument num_columns must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_columns,
message="Argument num_columns must be non-negative."),
], self._num_columns)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
if not self._num_columns.dtype.is_integer:
raise TypeError("Argument num_columns must be integer type. Found:"
" %s" % self._num_columns)
num_rows_static = self._num_rows_static
num_columns_static = self._num_columns_static
if num_rows_static is not None:
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
if num_columns_static is not None:
if num_columns_static.ndim != 0:
raise ValueError("Argument num_columns must be a 0-D Tensor. Found:"
" %s" % num_columns_static)
if num_columns_static < 0:
raise ValueError("Argument num_columns must be non-negative. Found:"
" %s" % num_columns_static)
示例10: kl_multivariate_normal
def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
"""Calculate the KL of multivariate normal distributions with
diagonal covariances.
Parameters
----------
loc_one : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the mean of a n-dimensional Gaussian
scale_one : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the standard deviation of a n-dimensional Gaussian
loc_two : tf.Tensor, optional
n-dimensional vector, or M x n-dimensional matrix where each
row represents the mean of a n-dimensional Gaussian
scale_two : tf.Tensor, optional
n-dimensional vector, or M x n-dimensional matrix where each
row represents the standard deviation of a n-dimensional Gaussian
Returns
-------
tf.Tensor
for scalar or vector inputs, outputs the scalar
``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
for matrix inputs, outputs the vector
``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``
Raises
------
InvalidArgumentError
If the location variables have Inf or NaN values, or if the scale
variables are not positive.
"""
dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
tf.verify_tensor_all_finite(loc_two, msg=''),
tf.assert_positive(scale_one),
tf.assert_positive(scale_two)]
loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
if loc_two == 0.0 and scale_two == 1.0:
return 0.5 * tf.reduce_sum(
tf.square(scale_one) + tf.square(loc_one) - \
1.0 - 2.0 * tf.log(scale_one))
else:
return 0.5 * tf.reduce_sum(
tf.square(scale_one/scale_two) + \
tf.square((loc_two - loc_one)/scale_two) - \
1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one), 1)
示例11: setUpClass
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
with session.Session() as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.Variable(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies([x], y, name="control_deps/ctrl_dep_y")
z = math_ops.mul(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies([x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_ops=["DebugIdentity"], debug_urls="file://%s" % cls._dump_root
)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(debug_dump)
# Construct the handler registry.
cls._registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
cls._registry.register_command_handler(
"node_info", analyzer.node_info, analyzer.get_help("node_info"), prefix_aliases=["ni"]
)
cls._registry.register_command_handler(
"list_inputs", analyzer.list_inputs, analyzer.get_help("list_inputs"), prefix_aliases=["li"]
)
cls._registry.register_command_handler(
"list_outputs", analyzer.list_outputs, analyzer.get_help("list_outputs"), prefix_aliases=["lo"]
)
示例12: _initialize_variables
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
示例13: _verify_input
def _verify_input(data, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Probabilities must be able to be converted to non-object numpy array.
np_probs = np.asarray(probs)
if np_probs.dtype == np.dtype('object'):
raise ValueError('Probabilities must be able to be converted to a numpy '
'array.')
checked_probs_list.append(np_probs)
# Probabilities must sum to one.
# TODO(joelshor): Investigate whether logits should be passed instead of
# probs.
if not np.isclose(np.sum(probs), 1.0):
raise ValueError('Probabilities must sum to one.')
# All probabilities should be the same length.
if not np.array_equiv([probs.shape for probs in checked_probs_list],
checked_probs_list[0].shape):
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
# Data tensor should have a batch dimension.
data_shape = data.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
data_shape[0].assert_is_compatible_with(labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
data_batch_size = array_ops.shape(data)[0]
labels_batch_size = array_ops.shape(labels)[0]
data = control_flow_ops.with_dependencies(
[check_ops.assert_positive(data_batch_size),
check_ops.assert_equal(data_batch_size, labels_batch_size)],
data)
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_integer(labels),
check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(len(probs), labels.dtype))],
labels)
return data, labels, checked_probs_list
示例14: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
示例15: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
# TODO(b/35157376): Use `assert_none_equal` once it exists.
return control_flow_ops.with_dependencies([
check_ops.assert_greater(
math_ops.abs(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)