本文整理汇总了Python中tensorflow.while_loop方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.while_loop方法的具体用法?Python tensorflow.while_loop怎么用?Python tensorflow.while_loop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.while_loop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _inv_preemphasis
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def _inv_preemphasis(x):
N = tf.shape(x)[0]
i = tf.constant(0)
W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)
def condition(i, y):
return tf.less(i, N)
def body(i, y):
tmp = tf.slice(x, [0], [i + 1])
tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
y = hparams.preemphasis * y + tmp
i = tf.add(i, 1)
return [i, y]
final = tf.while_loop(condition, body, [i, W])
y = final[1]
return y
示例2: _leapfrog
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def _leapfrog(self, q, p, step_size, get_gradient, mass):
def loop_cond(i, q, p):
return i < self.n_leapfrogs + 1
def loop_body(i, q, p):
step_size1 = tf.cond(i > 0,
lambda: step_size,
lambda: tf.constant(0.0, dtype=tf.float32))
step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
tf.less(0, i)),
lambda: step_size,
lambda: step_size / 2)
q, p = leapfrog_integrator(q, p, step_size1, step_size2,
lambda q: get_gradient(q), mass)
return [i + 1, q, p]
i = tf.constant(0)
_, q, p = tf.while_loop(loop_cond,
loop_body,
[i, q, p],
back_prop=False,
parallel_iterations=1)
return q, p
示例3: ngctc_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def ngctc_loss(term_probs, targets,seq_len,tar_len):
bs = tf.to_int32(tf.shape(term_probs)[0])
#loss = 0.
cond = lambda j,loss: tf.less(j, bs)
j = tf.constant(0,dtype=tf.int32)
loss = tf.constant(0,dtype=tf.float64)
def body(j,loss):
idx = tf.expand_dims(targets[j,:tar_len[j]],1)
st = tf.transpose(term_probs[j], (1, 0))
st = tf.transpose(tf.gather_nd(st, idx), (1, 0))
length = seq_len[j]
loss += -tf.reduce_sum(tf.log(forward_ngctc(st, length))/tf.to_double(bs)) # negative log likelihood for whole batch
return tf.add(j,1),loss # average loss over batches
out = tf.while_loop(cond,body,loop_vars= [j,loss])
return out[1]
示例4: ngctc_decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def ngctc_decode(term_probs, targets,seq_len,tar_len):
max_seq_len = tf.to_int32(tf.reduce_max(seq_len))
bs = tf.to_int32(tf.shape(term_probs)[0])
#loss = 0.
cond = lambda j,loss: tf.less(j, bs)
j = tf.constant(0,dtype=tf.int32)
decoded = tf.zeros([1,max_seq_len],dtype=tf.int32)
def body(j,decoded):
idx = tf.expand_dims(targets[j,:tar_len[j]],1)
st = tf.transpose(term_probs[j], (1, 0))
st = tf.transpose(tf.gather_nd(st, idx), (1, 0))
length = tf.to_int32(seq_len[j])
alphas = forward_ngctc(st, length,inference=True) # get essentially the probability of being at each node
dec = tf.to_int32(tf.argmax(alphas,axis=1)) # decode that by taking the argmax for each column of alphas
dec = tf.concat([dec,tf.zeros([max_seq_len-length],dtype=tf.int32)],axis=0)
decoded = tf.concat([decoded,[dec]],axis=0)
return tf.add(j,1),decoded
out = tf.while_loop(cond,body,loop_vars= [j,decoded],shape_invariants=[tf.TensorShape(None),tf.TensorShape([None, None])])
return out[1]
示例5: broadcast_against
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def broadcast_against(tensor, against_expr):
"""Adds trailing dimensions to mask to enable broadcasting against data
:param tensor: tensor to be broadcasted
:param against_expr: tensor will be broadcasted against it
:return: mask expr with tf.rank(mask) == tf.rank(data)
"""
def cond(data, tensor):
return tf.less(tf.rank(tensor), tf.rank(data))
def body(data, tensor):
return data, tf.expand_dims(tensor, -1)
shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)]
_, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants)
return tensor
示例6: search
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
cond=self._continue_search, body=self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.compat.v1.where(
tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq)
finished_scores = tf.compat.v1.where(
tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
示例7: double_factorial
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def double_factorial(n):
"""Computes the double factorial of `n`.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
n: A tensor of shape `[A1, ..., An]` containing positive integer values.
Returns:
A tensor of shape `[A1, ..., An]` containing the double factorial of `n`.
"""
n = tf.convert_to_tensor(value=n)
two = tf.ones_like(n) * 2
result = tf.ones_like(n)
_, result, _ = tf.while_loop(
cond=_double_factorial_loop_condition,
body=_double_factorial_loop_body,
loop_vars=[n, result, two])
return result
示例8: _reshape_decoder_outputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def _reshape_decoder_outputs(self):
""" Reshape decoder_outputs into shape [?, _NUM_UNITS]. """
def concat_output_slices(idx, val):
output_slice = tf.slice(
input_ = self.decoder_outputs,
begin = [idx, 0, 0],
size = [1, self.decoder_input_length[idx], _NUM_UNITS])
return tf.add(idx, 1),\
tf.concat([val, tf.squeeze(output_slice, axis = 0)],
axis = 0)
tf_i = tf.constant(0)
tf_v = tf.zeros(shape = [0, _NUM_UNITS], dtype = tf.float32)
_, reshaped_outputs = tf.while_loop(
cond = lambda i, v: i < _BATCH_SIZE,
body = concat_output_slices,
loop_vars = [tf_i, tf_v],
shape_invariants = [tf.TensorShape([]),
tf.TensorShape([None, _NUM_UNITS])])
tf.TensorShape([None, _NUM_UNITS]).\
assert_same_rank(reshaped_outputs.shape)
return reshaped_outputs
示例9: test_import_tf_comp_with_while_loop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def test_import_tf_comp_with_while_loop(self):
@computations.tf_computation(tf.float32)
def comp(x):
# An example of a loop with variables that computes 2^x by counting from
# x down to 0, and doubling the result in each iteration.
a = tf.Variable(0.0)
b = tf.Variable(1.0)
with tf.control_dependencies([a.initializer, b.initializer]):
with tf.control_dependencies([a.assign(x)]):
cond_fn = lambda a, b: a > 0
body_fn = lambda a, b: (a - 1.0, b * 2.0)
return tf.while_loop(cond_fn, body_fn, (a, b))[1]
module, mlir = self._import_compile_and_return_module_and_mlir(comp)
# Not checking the full MLIR in the long generated body, just that we can
# successfully ingest TF code containing a while loop here, end-to-end. We
# need some form of looping support in lieu of `tf.data.Dataset.reduce()`.
self._assert_mlir_contains_pattern(
mlir, ['func @fn(%arg0: tensor<f32>) -> tensor<f32>'])
result = runtime.compile_and_run_on_args(module, backend_info.VULKAN_SPIRV,
np.float32(5.0))
self.assertEqual(result, 32.0)
示例10: build_pgd_attack
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def build_pgd_attack(self, eps):
victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32)
def one_step_attack(image, grad):
"""
core components of this attack are:
(a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf)
(b) momentum (https://arxiv.org/pdf/1710.06081.pdf)
(c) input diversity (https://arxiv.org/pdf/1803.06978.pdf)
"""
orig_image = image
image = self.structure(image)
image = (image - 127.5) / 128.0
image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2)
prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
embeddings = tf.reshape(embeddings[0], [512, 1])
objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized
noise, = tf.gradients(objective, orig_image)
noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True)
noise = 0.9 * grad + noise
adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound)
return adv, noise
input = tf.to_float(self.image_batch)
lower_bound = tf.clip_by_value(input - eps, 0, 255.)
upper_bound = tf.clip_by_value(input + eps, 0, 255.)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
adv, _ = tf.while_loop(
lambda _, __: True, one_step_attack,
(input, tf.zeros_like(input)),
back_prop=False,
maximum_iterations=100,
parallel_iterations=1)
self.adv_image = adv
return adv
示例11: attack
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def attack(self, x, y):
"""
This method creates a symbolic graph that given an input image,
first randomly perturbs the image. The
perturbation is bounded to an epsilon ball. Then multiple steps of
gradient descent is performed to increase the probability of a target
label or decrease the probability of the ground-truth label.
:param x: A tensor with the input image.
"""
import tensorflow as tf
from cleverhans.utils_tf import clip_eta
if self.rand_init:
eta = tf.random_uniform(
tf.shape(x), -self.eps, self.eps, dtype=self.tf_dtype)
eta = clip_eta(eta, self.ord, self.eps)
else:
eta = tf.zeros_like(x)
def cond(i, _):
return tf.less(i, self.nb_iter)
def body(i, e):
new_eta = self.attack_single_step(x, e, y)
return i + 1, new_eta
_, eta = tf.while_loop(cond, body, [tf.zeros([]), eta], back_prop=True)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
示例12: _compute_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute gradient estimates using SPSA."""
# Assumes `x` is a list, containing a [1, H, W, C] image
assert len(x) == 1 and x[0].get_shape().as_list()[0] == 1
x = x[0]
x_shape = x.get_shape().as_list()
def body(i, grad_array):
delta = self._delta
delta_x = self._get_delta(x, delta)
delta_x = tf.concat([delta_x, -delta_x], axis=0)
loss_vals = tf.reshape(
loss_fn(x + delta_x),
[2 * self._num_samples] + [1] * (len(x_shape) - 1))
avg_grad = reduce_mean(loss_vals * delta_x, axis=0) / delta
avg_grad = tf.expand_dims(avg_grad, axis=0)
new_grad_array = grad_array.write(i, avg_grad)
return i + 1, new_grad_array
def cond(i, _):
return i < self._num_iters
_, all_grads = tf.while_loop(
cond,
body,
loop_vars=[
0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype)
],
back_prop=False,
parallel_iterations=1)
avg_grad = reduce_sum(all_grads.stack(), axis=0)
return [avg_grad]
示例13: attack
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def attack(self, x, y):
"""
This method creates a symbolic graph that given an input image,
first randomly perturbs the image. The
perturbation is bounded to an epsilon ball. Then multiple steps of
gradient descent is performed to increase the probability of a target
label or decrease the probability of the ground-truth label.
:param x: A tensor with the input image.
"""
import tensorflow as tf
from cleverhans.utils_tf import clip_eta
if self.rand_init:
eta = tf.random_uniform(
tf.shape(x), -self.eps, self.eps, dtype=self.tf_dtype)
eta = clip_eta(eta, self.ord, self.eps)
temp_logits = self.model.get_logits(x)
from cleverhans.loss import attack_softmax_cross_entropy
temp_loss = attack_softmax_cross_entropy(y, temp_logits)
adv_loss_fp = temp_loss
else:
eta = tf.zeros_like(x)
def cond(i, _, alf):
return tf.less(i, self.nb_iter)
def body(i, e, alf):
new_eta, adv_loss_fp = self.attack_single_step(x, e, y)
return i + 1, new_eta, adv_loss_fp
_, eta, adv_loss_fp = tf.while_loop(cond, body, [tf.zeros([]), eta, adv_loss_fp], back_prop=True)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x, adv_loss_fp
示例14: testParsingReaderOpWhileLoop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def testParsingReaderOpWhileLoop(self):
feature_size = 3
batch_size = 5
def ParserEndpoints():
return gen_parser_ops.gold_parse_reader(self._task_context,
feature_size,
batch_size,
corpus_name='training-corpus')
with self.test_session() as sess:
# The 'condition' and 'body' functions expect as many arguments as there
# are loop variables. 'condition' depends on the 'epoch' loop variable
# only, so we disregard the remaining unused function arguments. 'body'
# returns a list of updated loop variables.
def Condition(epoch, *unused_args):
return tf.less(epoch, 2)
def Body(epoch, num_actions, *feature_args):
# By adding one of the outputs of the reader op ('epoch') as a control
# dependency to the reader op we force the repeated evaluation of the
# reader op.
with epoch.graph.control_dependencies([epoch]):
features, epoch, gold_actions = ParserEndpoints()
num_actions = tf.maximum(num_actions,
tf.reduce_max(gold_actions, [0], False) + 1)
feature_ids = []
for i in range(len(feature_args)):
feature_ids.append(features[i])
return [epoch, num_actions] + feature_ids
epoch = ParserEndpoints()[-2]
num_actions = tf.constant(0)
loop_vars = [epoch, num_actions]
res = sess.run(
tf.while_loop(Condition, Body, loop_vars,
shape_invariants=[tf.TensorShape(None)] * 2,
parallel_iterations=1))
logging.info('Result: %s', res)
self.assertEqual(res[0], 2)
示例15: _BuildSequence
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import while_loop [as 别名]
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}