本文整理汇总了Python中tensorflow.Print方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Print方法的具体用法?Python tensorflow.Print怎么用?Python tensorflow.Print使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Print方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_quaternion_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def log_quaternion_loss(predictions, labels, params):
"""A helper function to compute the mean error between batches of quaternions.
The caller is expected to add the loss to the graph.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size 1, denoting the mean error between batches of quaternions.
"""
use_logging = params['use_logging']
logcost = log_quaternion_loss_batch(predictions, labels, params)
logcost = tf.reduce_sum(logcost, [0])
batch_size = params['batch_size']
logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')
if use_logging:
logcost = tf.Print(
logcost, [logcost], '[logcost]', name='log_quaternion_loss_print')
return logcost
示例2: _update_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def _update_value(self, observ, reward, length):
"""Perform multiple update steps of the value baseline.
We need to decide for the summary of one iteration, and thus choose the one
after half of the iterations.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
with tf.name_scope('update_value'):
loss, summary = tf.scan(
lambda _1, _2: self._update_value_step(observ, reward, length),
tf.range(self._config.update_epochs_value),
[0., ''], parallel_iterations=1)
print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'value loss: ')
with tf.control_dependencies([loss, print_loss]):
return summary[self._config.update_epochs_value // 2]
示例3: omniglot
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def omniglot():
sess = tf.InteractiveSession()
""" def wrapper(v):
return tf.Print(v, [v], message="Printing v")
v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
temp = wrapper(v)
#with tf.control_dependencies([temp]):
temp.eval()
print 'Hello'"""
def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:]
val = tf.cast(val, V.dtype)
def body(_, (v, d2, chg)):
d2_int = tf.cast(d2, tf.int32)
return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
return Z
示例4: pil_image_to_tf_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def pil_image_to_tf_summary(img, tag="debug_img"):
# serialise png bytes
sio = io.BytesIO()
img.save(sio, format="png")
png_bytes = sio.getvalue()
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto
return tf.Summary(value=[tf.Summary.Value(tag=tag,
image=tf.Summary.Image(height=img.size[0],
width=img.size[1],
colorspace=3, # RGB
encoded_image_string=png_bytes))])
#def dice_loss(y, y_hat, batch_size, smoothing=0):
# y = tf.reshape(y, (batch_size, -1))
# y_hat = tf.reshape(y_hat, (batch_size, -1))
# intersection = y * y_hat
# intersection_rs = tf.reduce_sum(intersection, axis=1)
# nom = intersection_rs + smoothing
# denom = tf.reduce_sum(y, axis=1) + tf.reduce_sum(y_hat, axis=1) + smoothing
# score = 2.0 * (nom / denom)
# loss = 1.0 - score
# loss = tf.Print(loss, [intersection, intersection_rs, nom, denom], first_n=100, summarize=10000)
# return loss
示例5: print_act_stats
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def print_act_stats(x, _str=""):
if not do_print_act_stats:
return x
if hvd.rank() != 0:
return x
if len(x.get_shape()) == 1:
x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
if len(x.get_shape()) == 2:
x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
if len(x.get_shape()) == 4:
x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
return tf.Print(x, stats, "["+_str+"] "+x.name)
# Allreduce methods
示例6: detect_min_val
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def detect_min_val(input_mat, var, threshold=1e-6, name='', debug=False):
"""
If debug is not set, will run clipout_neg. Else, will clip and print out odd eigen values
:param input_mat: (TensorFlow Tensor)
:param var: (TensorFlow Tensor) variable
:param threshold: (float) the cutoff threshold
:param name: (str) the name of the variable
:param debug: (bool) debug function
:return: (TensorFlow Tensor) clipped tensor
"""
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipout_neg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)),
lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped,
[tf.convert_to_tensor('odd ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name),
eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
示例7: apply_stats_eigen
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def apply_stats_eigen(self, eigen_list):
"""
apply the update using the eigen values of the stats
:param eigen_list: ([TensorFlow Tensor]) The list of eigen values of the stats
:return: ([TensorFlow Tensor]) update operations
"""
update_ops = []
if self.verbose > 1:
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for _, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
update_ops.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(update_ops):
factor_step_op = tf.assign_add(self.factor_step, 1)
update_ops.append(factor_step_op)
if KFAC_DEBUG:
update_ops.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('updated kfac factors')]))
return update_ops
示例8: decode_jpeg
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.compat.v1.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3) # ,
# fancy_upscaling=False,
# dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
示例9: decode_jpeg
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.compat.v1.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
return image
示例10: decode_jpeg
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.compat.v1.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3) #,
# fancy_upscaling=False,
# dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
示例11: get_logits
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def get_logits(new_input, length, first=[]):
"""
Compute the logits for a given waveform.
First, preprocess with the TF version of MFC above,
and then call DeepSpeech on the features.
"""
# new_input = tf.Print(new_input, [tf.shape(new_input)])
# We need to init DeepSpeech the first time we're called
if first == []:
first.append(False)
# Okay, so this is ugly again.
# We just want it to not crash.
tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
DeepSpeech.initialize_globals()
print('initialized deepspeech globals')
batch_size = new_input.get_shape()[0]
# 1. Compute the MFCCs for the input audio
# (this is differentable with our implementation above)
empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)
# 2. We get to see 9 frames at a time to make our decision,
# so concatenate them together.
features = tf.reshape(features, [new_input.get_shape()[0], -1])
features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
features = tf.reshape(features, [batch_size, -1, 19*26])
# 3. Whiten the data
mean, var = tf.nn.moments(features, axes=[0,1,2])
features = (features-mean)/(var**.5)
# 4. Finally we process it with DeepSpeech
logits = DeepSpeech.BiRNN(features, length, [0]*10)
return logits
示例12: fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def fprop(self, x, **kwargs):
mean = tf.reduce_mean(x)
std = tf.sqrt(tf.reduce_mean(tf.square(x - mean)))
return tf.Print(x,
[tf.reduce_min(x), mean, tf.reduce_max(x), std],
"Print layer")
示例13: add_scalar_summary_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def add_scalar_summary_op(tensor, name=None,
summary_key='summaries', print_summary_key='print_summaries', prefix=''):
collections = []
op = tf.summary.scalar(name, tensor, collections=collections)
if summary_key != print_summary_key:
tf.add_to_collection(summary_key, op)
op = tf.Print(op, [tensor], ' {:-<25s}: '.format(name) + prefix)
tf.add_to_collection(print_summary_key, op)
return op
示例14: reorder_beam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def reorder_beam(beam_size, batch_size, beam_val, output, is_first,
tensors_to_reorder):
"""Reorder to minimize beam costs."""
# beam_val is [batch_size x beam_size]; let b = batch_size * beam_size
# decided is len x b x a x b
# output is b x out_size; step is b x len x a x b;
outputs = tf.split(axis=0, num_or_size_splits=beam_size, value=tf.nn.log_softmax(output))
all_beam_vals, all_beam_idx = [], []
beam_range = 1 if is_first else beam_size
for i in xrange(beam_range):
top_out, top_out_idx = tf.nn.top_k(outputs[i], k=beam_size)
cur_beam_val = beam_val[:, i]
top_out = tf.Print(top_out, [top_out, top_out_idx, beam_val, i,
cur_beam_val], "GREPO", summarize=8)
all_beam_vals.append(top_out + tf.expand_dims(cur_beam_val, 1))
all_beam_idx.append(top_out_idx)
all_beam_idx = tf.reshape(tf.transpose(tf.concat(axis=1, values=all_beam_idx), [1, 0]),
[-1])
top_beam, top_beam_idx = tf.nn.top_k(tf.concat(axis=1, values=all_beam_vals), k=beam_size)
top_beam_idx = tf.Print(top_beam_idx, [top_beam, top_beam_idx],
"GREP", summarize=8)
reordered = [[] for _ in xrange(len(tensors_to_reorder) + 1)]
top_out_idx = []
for i in xrange(beam_size):
which_idx = top_beam_idx[:, i] * batch_size + tf.range(batch_size)
top_out_idx.append(tf.gather(all_beam_idx, which_idx))
which_beam = top_beam_idx[:, i] / beam_size # [batch]
which_beam = which_beam * batch_size + tf.range(batch_size)
reordered[0].append(tf.gather(output, which_beam))
for i, t in enumerate(tensors_to_reorder):
reordered[i + 1].append(tf.gather(t, which_beam))
new_tensors = [tf.concat(axis=0, values=t) for t in reordered]
top_out_idx = tf.concat(axis=0, values=top_out_idx)
return (top_beam, new_tensors[0], top_out_idx, new_tensors[1:])
示例15: log_quaternion_loss_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Print [as 别名]
def log_quaternion_loss_batch(predictions, labels, params):
"""A helper function to compute the error between quaternions.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size [batch_size], denoting the error between the quaternions.
"""
use_logging = params['use_logging']
assertions = []
if use_logging:
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1),
1e-4)),
['The l2 norm of each prediction quaternion vector should be 1.']))
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),
['The l2 norm of each label quaternion vector should be 1.']))
with tf.control_dependencies(assertions):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
if use_logging:
internal_dot_products = tf.Print(
internal_dot_products,
[internal_dot_products, tf.shape(internal_dot_products)],
'internal_dot_products:')
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return logcost