本文整理汇总了Python中tensorflow.cond方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.cond方法的具体用法?Python tensorflow.cond怎么用?Python tensorflow.cond使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.cond方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_autosummary_var
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
示例2: structure
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def structure(self, input_tensor):
"""
Args:
input_tensor: NHWC
"""
rnd = tf.random_uniform((), 135, 160, dtype=tf.int32)
rescaled = tf.image.resize_images(
input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
h_rem = 160 - rnd
w_rem = 160 - rnd
pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32)
pad_right = w_rem - pad_left
pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32)
pad_bottom = h_rem - pad_top
padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [
pad_left, pad_right], [0, 0]])
padded.set_shape((input_tensor.shape[0], 160, 160, 3))
output = tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(0.9),
lambda: padded, lambda: input_tensor)
return output
示例3: lstm_setup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out,
num_steps, state_input_op):
# returns state_name, state_init_op, updated_state_op, out_op
with tf.name_scope('reshape_'+name):
sh = x.get_shape().as_list()
x = tf.reshape(x, shape=[batch_size, -1, sh[-1]])
with tf.variable_scope(name) as varscope:
cell = tf.contrib.rnn.LSTMCell(
num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False,
num_proj=lstm_out, use_peepholes=True,
initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0),
cell_clip=None, proj_clip=None)
sh = [batch_size, 1, lstm_dim+lstm_out]
state_init_op = tf.constant(0., dtype=tf.float32, shape=sh)
fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope)
out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda:
fn(num_steps))
return name, state_init_op, updated_state_op, out_op
示例4: _create_learning_rate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _create_learning_rate(hyperparams, step_var):
"""Creates learning rate var, with decay and switching for CompositeOptimizer.
Args:
hyperparams: a GridPoint proto containing optimizer spec, particularly
learning_method to determine optimizer class to use.
step_var: tf.Variable, global training step.
Returns:
a scalar `Tensor`, the learning rate based on current step and hyperparams.
"""
if hyperparams.learning_method != 'composite':
base_rate = hyperparams.learning_rate
else:
spec = hyperparams.composite_optimizer_spec
switch = tf.less(step_var, spec.switch_after_steps)
base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
lambda: tf.constant(spec.method2.learning_rate))
return tf.train.exponential_decay(
base_rate,
step_var,
hyperparams.decay_steps,
hyperparams.decay_base,
staircase=hyperparams.decay_staircase)
示例5: _PadLabels2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _PadLabels2d(logits_size, labels):
"""Pads or slices the 2nd dimension of 2-d labels to match logits_size.
Covers the case of 1-d softmax output, when labels is [batch, seq] and
logits is [batch, seq, onehot]
Args:
logits_size: Tensor returned from tf.shape giving the target size.
labels: 2-d, but not necessarily matching in size.
Returns:
labels: Resized by padding or clipping the last dimension to logits_size.
"""
pad = logits_size - tf.shape(labels)[1]
def _PadFn():
return tf.pad(labels, [[0, 0], [0, pad]])
def _SliceFn():
return tf.slice(labels, [0, 0], [-1, logits_size])
return tf.cond(tf.greater(pad, 0), _PadFn, _SliceFn)
示例6: pad_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
示例7: pad_or_clip_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
processed_t = tf.cond(
tf.greater(tf.shape(t)[0], length),
lambda: clip_tensor(t, length),
lambda: pad_tensor(t, length))
if not _is_tensor(length):
processed_t = _set_dim_0(processed_t, length)
return processed_t
示例8: memory_run
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def memory_run(step, nmaps, mem_size, batch_size, vocab_size,
global_step, do_training, update_mem, decay_factor, num_gpus,
target_emb_weights, output_w, gpu_targets_tn, it):
"""Run memory."""
q = step[:, 0, it, :]
mlabels = gpu_targets_tn[:, it, 0]
res, mask, mem_loss = memory_call(
q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem)
res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1)
# Mix gold and original in the first steps, 20% later.
gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7)
use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor)
use_gold = tf.maximum(use_gold, 0.2) * do_training
mem = tf.cond(tf.less(tf.random_uniform([]), use_gold),
lambda: use_gold * gold + (1.0 - use_gold) * res,
lambda: res)
mem = tf.reshape(mem, [-1, 1, 1, nmaps])
return mem, mem_loss, update_mem
示例9: experience
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def experience(self, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
lambda: self._define_experience(observ, action, reward), str)
示例10: _define_experience
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _define_experience(self, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.summary.merge([
self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
batch = observ, action, self._last_mean, self._last_logstd, reward
append = self._episodes.append(batch, tf.range(len(self._batch_env)))
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._memory_index),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)]), str)
return summary
示例11: end_episode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: 1D tensor of batch indices for agents starting an episode.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
示例12: _std
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
示例13: experience
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
示例14: end_episode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
示例15: _apply_cond
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cond [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
"""Apply conditionally if counter is zero."""
grad_acc = self.get_slot(var, "grad_acc")
def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
adam_op = apply_fn(total_grad, var, *args, **kwargs)
with tf.control_dependencies([adam_op]):
grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
use_locking=self._use_locking)
return tf.group(adam_op, grad_acc_to_zero_op)
def accumulate_gradient(grad_acc, grad):
assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
return tf.group(assign_op) # Strip return value
return tf.cond(
tf.equal(self._get_iter_variable(), 0),
lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
lambda: accumulate_gradient(grad_acc, grad))