本文整理汇总了Python中tensorflow.print方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.print方法的具体用法?Python tensorflow.print怎么用?Python tensorflow.print使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.print方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def testLoss(self):
batch_size = 2
key_depth = 5
val_depth = 5
memory_size = 4
window_size = 3
x_depth = 5
memory = transformer_memory.TransformerMemory(
batch_size, key_depth, val_depth, memory_size)
x = tf.random_uniform([batch_size, window_size, x_depth], minval=.0)
memory_results, _, _, _ = (
memory.pre_attention(
tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32),
x, None, None))
x = memory.post_attention(memory_results, x)
with tf.control_dependencies([tf.print("x", x)]):
is_nan = tf.reduce_any(tf.math.is_nan(x))
with self.test_session() as session:
session.run(tf.global_variables_initializer())
for _ in range(100):
is_nan_value, _ = session.run([is_nan, x])
self.assertEqual(is_nan_value, False)
示例2: multiline_print
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def multiline_print(lists):
"""Prints multiple lines of output using tf.print."""
combined_list = []
combined_list += lists[0]
# We prepend newline characters to strings at the start of lines to avoid
# the ugly space intendations that tf.print's behavior of separating
# everything with a space would otherwise cause.
for item in lists[1:]:
if isinstance(item[0], str):
combined_list += (("\n" + item[0],) + item[1:])
else:
combined_list += (("\n",) + item)
return tf.print(*combined_list)
示例3: run_training_iteration
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def run_training_iteration(self, sess, summary_writer, iteration_number):
feeds = sess.run(self.training_feeds)
feed_dict = {
self._placeholder_vertex_features : feeds[0],
self._placeholder_image : feeds[1],
self._placeholder_global_features : feeds[2],
self._placeholder_cell_adj_matrix : feeds[3],
self._placeholder_row_adj_matrix : feeds[4],
self._placeholder_col_adj_matrix : feeds[5],
}
print("Training Iteration %d:" % iteration_number)
ops_to_run = self.graph_predicted_sampled_adj_matrices + self.graph_gt_sampled_adj_matrices + \
self.graph_sampled_indices+ [self.graph_optimizer, self.graph_prints, self.graph_summaries_training]
ops_result = sess.run(ops_to_run, feed_dict = feed_dict)
summary_writer.add_summary(ops_result[-1], iteration_number)
示例4: print_metrics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def print_metrics(metrics, step, every, name='metrics'):
means, updates = [], []
for key, value in metrics.items():
key = 'metrics_{}_{}'.format(name, key)
mean = tools.StreamingMean((), tf.float32, key)
means.append(mean)
updates.append(mean.submit(value))
with tf.control_dependencies(updates):
# message = 'step/' + '/'.join(metrics.keys()) + ' = '
message = '{}: step/{} ='.format(name, '/'.join(metrics.keys()))
gs = tf.train.get_or_create_global_step()
print_metrics = tf.cond(
tf.equal(step % every, 0),
lambda: tf.print(message, [gs] + [mean.clear() for mean in means]),
tf.no_op)
return print_metrics
示例5: collect_initial_episodes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def collect_initial_episodes(config):
items = config.random_collects.items()
items = sorted(items, key=lambda x: x[0])
existing = {}
for name, params in items:
outdir = params.save_episode_dir
tf.gfile.MakeDirs(outdir)
if outdir not in existing:
existing[outdir] = len(tf.gfile.Glob(os.path.join(outdir, '*.npz')))
if params.num_episodes <= existing[outdir]:
existing[outdir] -= params.num_episodes
else:
remaining = params.num_episodes - existing[outdir]
existing[outdir] = 0
env_ctor = params.task.env_ctor
print('Collecting {} initial episodes ({}).'.format(remaining, name))
control.random_episodes(env_ctor, remaining, outdir)
示例6: maybe_minimize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def maybe_minimize(self, condition, loss):
# loss = tf.cond(condition, lambda: loss, float)
update_op, grad_norm = tf.cond(
condition,
lambda: self.minimize(loss),
lambda: (tf.no_op(), 0.0))
with tf.control_dependencies([update_op]):
summary = tf.cond(
tf.logical_and(condition, self._log),
lambda: self.summarize(grad_norm), str)
if self._debug:
# print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm)
message = 'Zero gradient norm in {} optimizer.'.format(self._name)
assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message)
assert_op = tf.cond(condition, assertion, tf.no_op)
with tf.control_dependencies([assert_op]):
summary = tf.identity(summary)
return summary, grad_norm
示例7: gen_training_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def gen_training_input(total_size, nb_feats, batch_size):
"""Generate random data for training."""
x_np = np.random.uniform(-0.5, 0.5, size=[total_size, nb_feats])
y_np = np.array(x_np.mean(axis=1) > 0, np.float32)
train_set = (
tf.data.Dataset.from_tensor_slices((x_np, y_np))
.map(norm)
.shuffle(buffer_size=100)
.repeat()
.batch(batch_size)
)
train_set_iterator = train_set.make_one_shot_iterator()
x, y = train_set_iterator.get_next()
x = tf.reshape(x, [batch_size, nb_feats])
y = tf.reshape(y, [batch_size, 1])
# tf.print(x, data=[x], message="x: ", summarize=6)
return x, y
示例8: evaluate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def evaluate(self, sess, x, y, data_owner):
"""Return the accuracy"""
def print_accuracy(y_hat, y) -> tf.Operation:
with tf.name_scope("print-accuracy"):
correct_prediction = tf.equal(tf.round(y_hat), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print_op = tf.print(
"Accuracy on {}:".format(data_owner.player_name), accuracy
)
return print_op
with tf.name_scope("evaluate"):
y_hat = self.forward(x)
print_accuracy_op = tfe.define_output(
data_owner.player_name, [y_hat, y], print_accuracy
)
sess.run(print_accuracy_op, tag="evaluate")
示例9: cond
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def cond(
self,
i: tf.Tensor,
max_iter: tf.Tensor,
nb_epochs: tf.Tensor,
avg_loss: tf.Tensor,
):
"""Check if training termination condition has been met."""
is_end_epoch = tf.equal(i % max_iter, 0)
to_continue = tf.cast(i < max_iter * nb_epochs, tf.bool)
def true_fn() -> tf.Tensor:
to_continue = tf.print("avg_loss: ", avg_loss)
return to_continue
def false_fn() -> tf.Tensor:
return to_continue
return tf.cond(is_end_epoch, true_fn, false_fn)
示例10: debug
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def debug(x: PondTensor, summarize=None, message=""):
"""Print contents of a PondTensor for debugging purposes."""
if isinstance(x, PondPublicTensor):
tf.print(
x.value_on_0.value,
[x.value_on_0.value],
summarize=summarize,
message=message,
)
elif isinstance(x, PondPrivateTensor):
tf.print(
x.share0.value,
[x.reveal().value_on_0.value],
summarize=summarize,
message=message,
)
else:
raise TypeError("Don't know how to debug {}".format(type(x)))
#
# identity
#
示例11: train
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def train(self, train_dataset):
""" main training call for CVAE """
num_samples = int(train_dataset.shape[0]/self.batch_size)
train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset).shuffle(train_dataset.shape[0]).batch(self.batch_size)
for i in range(self.epochs):
j = 1
norm = 0
Loss = 0
print("Epoch: %s" % str(i+1))
for train_x in train_dataset:
gradients, loss = self.compute_gradients(train_x)
Loss += loss
norm += tf.reduce_mean([tf.norm(g) for g in gradients])
self.apply_gradients(gradients)
if j != 1 and j % 20 == 0:
# good to print out euclidean norm of gradients
tf.print("Epoch: %s, Batch: %s/%s" % (i+1,j,num_samples))
tf.print("Mean-Loss: ", Loss/j, ", Mean gradient-norm: ", norm/j)
j += 1
示例12: _distributed_epoch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def _distributed_epoch(self, dataset, step):
total_loss = 0.0
num_batches = 0.0
for batch in dataset:
if self.writer is not None:
with self.writer.as_default():
tf.summary.image(
"Training data",
tf.cast(batch[0] * 255, tf.uint8),
max_outputs=8)
per_replica_loss = self._distribution_strategy.experimental_run_v2(
self._train_step if step else self._val_step, args=(batch,))
total_loss += self._distribution_strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_loss,
axis=None)
num_batches += 1.0
tf.print(num_batches, ':', total_loss / num_batches, sep='')
total_loss = total_loss / num_batches
return total_loss
示例13: _info
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def _info(self):
parent_info = tfds.object_detection.voc.Voc().info
print(parent_info)
return tfds.core.DatasetInfo(
builder=self,
description=parent_info.description,
features=tfds.features.FeaturesDict(
{
"image": tfds.features.Image(shape=(None, None, 3)),
"image/filename": tfds.features.Text(),
"label": tfds.features.Image(shape=(None, None, 1)),
}
),
homepage=parent_info.homepage,
citation=parent_info.citation,
)
示例14: save_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def save_config(config, logdir=None):
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
print(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.GFile(config_path, 'w') as file_:
yaml.dump(
config, file_, yaml.Dumper,
allow_unicode=True,
default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
print(message)
return config
示例15: print_metrics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import print [as 别名]
def print_metrics(metrics, step, every, decimals=2, name='metrics'):
factor = 10 ** decimals
means, updates = [], []
for key, value in metrics.items():
key = 'metrics_{}_{}'.format(name, key)
mean = tools.StreamingMean((), tf.float32, key)
means.append(mean)
updates.append(mean.submit(value))
with tf.control_dependencies(updates):
message = '{}: step/{} ='.format(name, '/'.join(metrics.keys()))
print_metrics = tf.cond(
tf.equal(step % every, 0),
lambda: tf.print(message, [step] + [
tf.round(mean.clear() * factor) / factor for mean in means]),
tf.no_op)
return print_metrics