本文整理汇总了Python中tensorflow.python.platform.tf_logging.info方法的典型用法代码示例。如果您正苦于以下问题:Python tf_logging.info方法的具体用法?Python tf_logging.info怎么用?Python tf_logging.info使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.platform.tf_logging
的用法示例。
在下文中一共展示了tf_logging.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _clip_gradients
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def _clip_gradients(self, grad):
"""Clips gradients if the hyperparameter `gradient_clip_norm` requires it.
Sparse tensors, in the form of IndexedSlices returned for the
gradients of embeddings, require special handling.
Args:
grad: Gradient Tensor, IndexedSlices, or None.
Returns:
Optionally clipped gradient.
"""
if grad is not None and self.hyperparams.gradient_clip_norm > 0:
logging.info('Clipping gradient %s', grad)
if isinstance(grad, tf.IndexedSlices):
tmp = tf.clip_by_norm(grad.values, self.hyperparams.gradient_clip_norm)
return tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
return tf.clip_by_norm(grad, self.hyperparams.gradient_clip_norm)
else:
return grad
示例2: RunTraining
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def RunTraining(self, hyperparam_config):
master_spec = self.LoadSpec('master_spec_link.textproto')
self.assertTrue(isinstance(hyperparam_config, spec_pb2.GridPoint))
gold_doc = sentence_pb2.Sentence()
text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc)
gold_doc_2 = sentence_pb2.Sentence()
text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2)
reader_strings = [
gold_doc.SerializeToString(), gold_doc_2.SerializeToString()
]
tf.logging.info('Generating graph with config: %s', hyperparam_config)
with tf.Graph().as_default():
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
target = spec_pb2.TrainTarget()
target.name = 'testTraining-all'
train = builder.add_training_from_config(target)
with self.test_session() as sess:
logging.info('Initializing')
sess.run(tf.global_variables_initializer())
# Run one iteration of training and verify nothing crashes.
logging.info('Training')
sess.run(train['run'], feed_dict={train['input_batch']: reader_strings})
示例3: checkOpOrder
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def checkOpOrder(self, name, endpoint, expected_op_order):
"""Checks that ops ending up at root are called in the expected order.
To check the order, we find a path along the directed graph formed by
the inputs of each op. If op X has a chain of inputs to op Y, then X
cannot be executed before Y. There may be multiple paths between any two
ops, but the ops along any path are executed in that order. Therefore, we
look up the expected ops in reverse order.
Args:
name: string name of the endpoint, for logging.
endpoint: node whose execution we want to check.
expected_op_order: string list of op types, in the order we expecte them
to be executed leading up to `endpoint`.
"""
for target in reversed(expected_op_order):
path = _find_input_path_to_type(endpoint, target)
self.assertNotEmpty(path)
logging.info('path[%d] from %s to %s: %s',
len(path), name, target, [_as_op(x).type for x in path])
endpoint = path[-1]
示例4: add_regularizer
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def add_regularizer(self, cost):
"""Adds L2 regularization for parameters which have it turned on.
Args:
cost: float cost before regularization.
Returns:
Updated cost optionally including regularization.
"""
if self.network is None:
return cost
regularized_weights = self.network.get_l2_regularized_weights()
if not regularized_weights:
return cost
l2_coeff = self.master.hyperparams.l2_regularization_coefficient
if l2_coeff == 0.0:
return cost
tf.logging.info('[%s] Regularizing parameters: %s', self.name,
[w.name for w in regularized_weights])
l2_costs = [tf.nn.l2_loss(p) for p in regularized_weights]
return tf.add(cost, l2_coeff * tf.add_n(l2_costs), name='regularizer')
示例5: ValidateDocuments
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def ValidateDocuments(self):
doc_source = gen_parser_ops.document_source(
task_context=self.context_file, batch_size=1)
with self.test_session() as sess:
logging.info('Reading document1')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertEqual(len(doc.token), 12)
self.assertEqual(u'लाजमी', doc.token[9].word)
self.assertFalse(last)
logging.info('Reading document2')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertEqual(len(doc.token), 13)
self.assertEqual(u'भंग', doc.token[9].word)
self.assertFalse(last)
logging.info('Hitting end of the dataset')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertTrue(doc is None)
self.assertTrue(last)
示例6: CheckUntokenizedDoc
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def CheckUntokenizedDoc(self, sentence, words, starts, ends):
self.WriteContext('untokenized-text')
logging.info('Writing text file to: %s', self.corpus_file)
with open(self.corpus_file, 'w') as f:
f.write(sentence)
sentence, _ = gen_parser_ops.document_source(
task_context=self.context_file, batch_size=1)
with self.test_session() as sess:
sentence_doc = self.ReadNextDocument(sess, sentence)
self.assertEqual(len(sentence_doc.token), len(words))
self.assertEqual(len(sentence_doc.token), len(starts))
self.assertEqual(len(sentence_doc.token), len(ends))
for i, token in enumerate(sentence_doc.token):
self.assertEqual(token.word.encode('utf-8'), words[i])
self.assertEqual(token.start, starts[i])
self.assertEqual(token.end, ends[i])
示例7: PathScores
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def PathScores(self, iterations, beam_size, max_steps, batch_size):
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(beam_size=beam_size, max_steps=max_steps,
batch_size=batch_size).training
sess.run(t['inits'])
all_path_scores = []
beam_path_scores = []
for i in range(iterations):
logging.info('run %d', i)
tensors = (
sess.run(
[t['alive_steps'], t['concat_scores'],
t['all_path_scores'], t['beam_path_scores'],
t['indices'], t['path_ids']]))
logging.info('alive for %s, all_path_scores and beam_path_scores, '
'indices and path_ids:'
'\n%s\n%s\n%s\n%s',
tensors[0], tensors[2], tensors[3], tensors[4], tensors[5])
logging.info('diff:\n%s', tensors[2] - tensors[3])
all_path_scores.append(tensors[2])
beam_path_scores.append(tensors[3])
return all_path_scores, beam_path_scores
示例8: main
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def main(unused_argv):
logging.set_verbosity(logging.INFO)
with tf.Session() as sess:
src = gen_parser_ops.document_source(batch_size=32,
corpus_name=FLAGS.corpus_name,
task_context=FLAGS.task_context)
sentence = sentence_pb2.Sentence()
while True:
documents, finished = sess.run(src)
logging.info('Read %d documents', len(documents))
for d in documents:
sentence.ParseFromString(d)
tr = asciitree.LeftAligned()
d = to_dict(sentence)
print 'Input: %s' % sentence.text
print 'Parse:'
tr_str = tr(d)
pat = re.compile(r'\s*@\d+$')
for tr_ln in tr_str.splitlines():
print pat.sub('', tr_ln)
if finished:
break
示例9: _AddOutputs
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
"""Adds the output layer and loss function.
Args:
prev_layer: Output of last layer of main network.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
num_classes: Number of outputs/size of last output dimension.
"""
height_in = shapes.tensor_dim(prev_layer, dim=1)
logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func,
num_classes)
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.summary.scalar('loss', self.loss)
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
logging.info('Final output=%s', outputs)
logging.info('Labels tensor=%s', self.labels)
self.output = outputs
示例10: add_gradients_summaries
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '/gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '/gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
示例11: _get_first_op_from_collection
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
示例12: report_benchmark
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras)
示例13: run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = None
示例14: after_run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(run_values.results))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, run_values.results[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
self._iter_count += 1
示例15: before_run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import info [as 别名]
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info("Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)