本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.cond函数的典型用法代码示例。如果您正苦于以下问题:Python cond函数的具体用法?Python cond怎么用?Python cond使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cond函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testCondNested
def testCondNested(self):
with context.graph_mode(), self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name='true')
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope('final'):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
示例2: dense_make_stats_update
def dense_make_stats_update(is_active, are_buckets_ready, float_column,
quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for dense split handler."""
empty_float = constant_op.constant_v1([], dtype=dtypes.float32)
quantile_values, quantile_weights = control_flow_ops.cond(
is_active[1], # For the next layer, this handler is inactive.
lambda: (float_column, weights),
lambda: (empty_float, empty_float))
def ready_inputs_fn():
"""Branch to execute when quantiles are ready."""
quantized_feature = quantile_ops.quantiles([float_column], [],
[quantile_buckets], [], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
return (example_partition_ids, quantized_feature, gradients, hessians)
def not_ready_inputs_fn():
return (constant_op.constant_v1([], dtype=dtypes.int32),
constant_op.constant_v1([[]], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(
math_ops.logical_and(are_buckets_ready,
array_ops.size(quantile_buckets) > 0),
is_active[0]), ready_inputs_fn, not_ready_inputs_fn))
return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
gradients, hessians)
示例3: _rnn_step
def _rnn_step(time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell):
""" Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
The pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Arguments:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: `Tensor` matrix of shape [batch_size, state_size]
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is a `Tensor` matrix of shape [batch_size, state_size]
"""
# Step 1: determine whether we need to call_cell or not
empty_update = lambda: (zero_output, state)
state_shape = state.get_shape()
output, new_state = control_flow_ops.cond(
time < max_sequence_length, call_cell, empty_update)
# Step 2: determine whether we need to copy through state and/or outputs
existing_output_state = lambda: (output, new_state)
def copy_through():
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return (math_ops.select(copy_cond, zero_output, output),
math_ops.select(copy_cond, state, new_state))
(output, state) = control_flow_ops.cond(
time < min_sequence_length, existing_output_state, copy_through)
output.set_shape(zero_output.get_shape())
state.set_shape(state_shape)
return (output, state)
示例4: random_flip_left_right
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
示例5: remove_squeezable_dimensions
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[labels, predictions]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == expected_rank_diff + 1:
predictions = array_ops.squeeze(predictions, [-1])
elif rank_diff == expected_rank_diff - 1:
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
示例6: _get_chol_and_x_compatible_shape
def _get_chol_and_x_compatible_shape(self, x):
"""Return self.chol and x, (possibly) broadcast to compatible shape."""
# x and chol are "compatible" if their shape matches except for the last two
# dimensions of chol are [k, k], and the last two of x are [k, 1].
# E.g. x.shape = [A, B, k, 1], and chol.shape = [A, B, k, k]
# This is required for the batch_triangular_solve, which does not broadcast.
# TODO(langmore) This broadcast replicates matrices unnecesarily! In the
# case where
# x.shape = [M1,...,Mr, N1,...,Nb, k], and chol.shape = [N1,...,Nb, k, k]
# (which is common if x was sampled), the front dimensions of x can be
# "flipped" to the end, making
# x_flipped.shape = [N1,...,Nb, k, M1*...*Mr],
# and this can be handled by the linear solvers. This is preferred, because
# it does not replicate the matrix, or create any new data.
# We assume x starts without the trailing singleton dimension, e.g.
# x.shape = [B, k].
chol = self._chol
with ops.op_scope([x] + self.inputs, 'get_chol_and_x_compatible_shape'):
# If we determine statically that shapes match, we're done.
if x.get_shape() == chol.get_shape()[:-1]:
x_expanded = array_ops.expand_dims(x, -1)
return chol, x_expanded
# Dynamic check if shapes match or not.
vector_shape = self.vector_shape() # Shape of chol minus last dim.
are_same_rank = math_ops.equal(
array_ops.rank(x), array_ops.rank(vector_shape))
def shapes_match_if_same_rank():
return math_ops.reduce_all(math_ops.equal(
array_ops.shape(x), vector_shape))
shapes_match = control_flow_ops.cond(are_same_rank,
shapes_match_if_same_rank,
lambda: ops.convert_to_tensor(False))
# Make tensors (never instantiated) holding the broadcast shape.
# matrix_broadcast_dummy is the shape we will broadcast chol to.
matrix_bcast_dummy = chol + array_ops.expand_dims(x, -1)
# vector_bcast_dummy is the shape we will bcast x to, before we expand it.
chol_minus_last_dim = math_ops.reduce_sum(chol, reduction_indices=[-1])
vector_bcast_dummy = x + chol_minus_last_dim
chol_bcast = chol + array_ops.zeros_like(matrix_bcast_dummy)
x_bcast = x + array_ops.zeros_like(vector_bcast_dummy)
chol_result = control_flow_ops.cond(shapes_match, lambda: chol,
lambda: chol_bcast)
chol_result.set_shape(matrix_bcast_dummy.get_shape())
x_result = control_flow_ops.cond(shapes_match, lambda: x, lambda: x_bcast)
x_result.set_shape(vector_bcast_dummy.get_shape())
x_expanded = array_ops.expand_dims(x_result, -1)
return chol_result, x_expanded
示例7: test_none
def test_none(self):
fn_none = lambda: None
fn_tensor = lambda: constant_op.constant(1)
with self.assertRaises(ValueError):
control_flow_ops.cond(constant_op.constant(True), fn_none, fn_tensor)
with self.assertRaises(ValueError):
control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_none)
示例8: testCondContext
def testCondContext(self):
with self.test_session() as sess:
x = constant_op.constant(2)
y = constant_op.constant(5)
control_flow_ops.cond(math_ops.less(x, y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23))
for op in sess.graph.get_operations():
c = op._get_control_flow_context()
if c:
compare.ProtoEq(c.to_proto(), control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
示例9: body
def body(i, prev_c, prev_h, actions, log_probs):
# pylint: disable=g-long-lambda
signal = control_flow_ops.cond(
math_ops.equal(i, 0),
lambda: array_ops.tile(device_go_embedding,
[self.hparams.num_children, 1]),
lambda: embedding_ops.embedding_lookup(device_embeddings,
actions.read(i - 1))
)
if self.hparams.keep_prob is not None:
signal = nn_ops.dropout(signal, self.hparams.keep_prob)
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
query = math_ops.matmul(next_h, attn_w_2)
query = array_ops.reshape(
query, [self.hparams.num_children, 1, self.hparams.hidden_size])
query = math_ops.tanh(query + attn_mem)
query = array_ops.reshape(query, [
self.hparams.num_children * self.num_groups, self.hparams.hidden_size
])
query = math_ops.matmul(query, attn_v)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups])
query = nn_ops.softmax(query)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups, 1])
query = math_ops.reduce_sum(attn_mem * query, axis=1)
query = array_ops.concat([next_h, query], axis=1)
logits = math_ops.matmul(query, device_softmax)
logits /= self.hparams.temperature
if self.hparams.tanh_constant > 0:
logits = math_ops.tanh(logits) * self.hparams.tanh_constant
if self.hparams.logits_std_noise > 0:
num_in_logits = math_ops.cast(
array_ops.size(logits), dtype=dtypes.float32)
avg_norm = math_ops.divide(
linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
logits_noise = random_ops.random_normal(
array_ops.shape(logits),
stddev=self.hparams.logits_std_noise * avg_norm)
logits = control_flow_ops.cond(
self.global_step > self.hparams.stop_noise_step, lambda: logits,
lambda: logits + logits_noise)
if mode == "sample":
next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
elif mode == "greedy":
next_y = math_ops.argmax(logits, 1)
elif mode == "target":
next_y = array_ops.slice(y, [0, i], [-1, 1])
else:
raise NotImplementedError
next_y = math_ops.to_int32(next_y)
next_y = array_ops.reshape(next_y, [self.hparams.num_children])
actions = actions.write(i, next_y)
log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=next_y)
return i + 1, next_c, next_h, actions, log_probs
示例10: testCond_3
def testCond_3(self):
with self.test_session():
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
fn3 = lambda: tf.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
示例11: false_fn
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
示例12: testRaisesOutputStructuresMismatch
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
TypeError, "true_fn and false_fn arguments to tf.cond must have the "
"same number, type, and overall structure of return values."):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
示例13: _initialize_variables
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
示例14: testRaisesOutputStructuresMismatch
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
ValueError, "Outputs of true_fn and false_fn must"
" have the same structure"):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
示例15: batch_norm
def batch_norm(x, deterministic, alpha=0.9, shift=True, scope='bn'):
with vs.variable_scope(scope):
dtype = x.dtype
input_shape = x.get_shape().as_list()
feat_dim = input_shape[-1]
axes = range(len(input_shape)-1)
if shift:
beta = vs.get_variable(
scope+"_beta", shape=[feat_dim],
initializer=init_ops.zeros_initializer, dtype=dtype)
else:
beta = vs.get_variable(
scope+"_beta", shape=[feat_dim],
initializer=init_ops.zeros_initializer,
dtype=dtype, trainable=False)
gamma = vs.get_variable(
scope+"_gamma", shape=[feat_dim],
initializer=init_ops.constant_initializer(0.1), dtype=dtype)
mean = vs.get_variable(scope+"_mean", shape=[feat_dim],
initializer=init_ops.zeros_initializer,
dtype=dtype, trainable=False)
var = vs.get_variable(scope+"_var", shape=[feat_dim],
initializer=init_ops.ones_initializer,
dtype=dtype, trainable=False)
counter = vs.get_variable(scope+"_counter", shape=[],
initializer=init_ops.constant_initializer(0),
dtype=tf.int64, trainable=False)
zero_cnt = vs.get_variable(scope+"_zero_cnt", shape=[],
initializer=init_ops.constant_initializer(0),
dtype=tf.int64, trainable=False)
batch_mean, batch_var = moments(x, axes, name=scope+'_moments')
mean, var = cond(math_ops.equal(counter, zero_cnt), lambda: (batch_mean, batch_var),
lambda: (mean, var))
mean, var, counter = cond(deterministic, lambda: (mean, var, counter),
lambda: ((1-alpha) * batch_mean + alpha * mean,
(1-alpha) * batch_var + alpha * var,
counter + 1))
normed = batch_normalization(x, mean, var, beta, gamma, 1e-8)
return normed