本文整理汇总了Python中tensorflow.python.ops.logging_ops.scalar_summary方法的典型用法代码示例。如果您正苦于以下问题:Python logging_ops.scalar_summary方法的具体用法?Python logging_ops.scalar_summary怎么用?Python logging_ops.scalar_summary使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.logging_ops
的用法示例。
在下文中一共展示了logging_ops.scalar_summary方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_queue
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def create_queue(self, shared_name=None, name=None):
from tensorflow.python.ops import data_flow_ops, logging_ops, math_ops
from tensorflow.python.framework import dtypes
assert self.dtypes is not None and self.shapes is not None
assert len(self.dtypes) == len(self.shapes)
capacity = self.queue_size
self._queue = data_flow_ops.FIFOQueue(
capacity=capacity,
dtypes=self.dtypes,
shapes=self.shapes,
shared_name=shared_name,
name=name)
enq = self._queue.enqueue_many(self.batch_phs)
# create a queue runner
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._queue, [enq]*self.nthreads,
feed_dict_op=[lambda: self.next_batch()],
feed_dict_key=self.batch_phs))
# summary_name = 'fraction_of_%d_full' % capacity
# logging_ops.scalar_summary("queue/%s/%s" % (
# self._queue.name, summary_name), math_ops.cast(
# self._queue.size(), dtypes.float32) * (1. / capacity))
示例2: _add_hidden_layer_summary
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def _add_hidden_layer_summary(value, tag):
logging_ops.scalar_summary("%s/fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s/activation" % tag, value)
示例3: _training_loss
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def _training_loss(features,
labels,
logits,
loss_fn,
weight_column_name=None,
head_name=None):
"""Returns training loss tensor.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
features: Features `dict`.
labels: Either a `Tensor` for labels or in multihead case, a `dict` of
string to `Tensor`.
logits: logits, a float `Tensor`. Shape is `(batch_size, logits_dimension)`.
loss_fn: Function taking `logits` and `labels`, and returning the raw
unweighted loss.
weight_column_name: Key for weights `Tensor` in `features`, if applicable.
head_name: Head name, used for summary.
Returns:
A loss `Output`.
"""
with ops.name_scope(None, "training_loss",
tuple(six.itervalues(features)) +
(labels, logits)) as name:
loss, weighted_average_loss = _loss(
loss_fn(logits, labels),
_weight_tensor(features, weight_column_name),
name=name)
# The tag must be same as the tag for eval loss, so the losses will show up
# in the same graph in tensorboard.
logging_ops.scalar_summary(
_summary_key(head_name, "loss"), weighted_average_loss)
return loss
示例4: _get_model_function
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def _get_model_function(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses,
training_op) = clustering_ops.KMeans(
self._parse_tensor_or_dict(features),
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.
_kmeans_plus_plus_num_retries).training_graph()
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
logging_ops.scalar_summary('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss,}
if self._relative_tolerance is not None:
training_hooks = [self.LossRelativeChangeHook(self._relative_tolerance)]
else:
training_hooks = None
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
return _model_fn
示例5: _add_hidden_layer_summary
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def _add_hidden_layer_summary(value, tag):
logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s:activation" % tag, value)
示例6: _create_model_fn_ops
# 需要导入模块: from tensorflow.python.ops import logging_ops [as 别名]
# 或者: from tensorflow.python.ops.logging_ops import scalar_summary [as 别名]
def _create_model_fn_ops(features,
mode,
loss_fn,
logits_to_predictions_fn,
metrics_fn,
create_output_alternatives_fn,
labels=None,
train_op_fn=None,
logits=None,
logits_dimension=None,
head_name=None,
weight_column_name=None,
enable_centered_bias=False):
"""Returns a `ModelFnOps` object."""
_check_mode_valid(mode)
centered_bias = None
if enable_centered_bias:
centered_bias = _centered_bias(logits_dimension, head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = logits_to_predictions_fn(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
# Uses the deprecated API to set the tag explicitly.
# Without it, trianing and eval losses will show up in different graphs.
logging_ops.scalar_summary(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
batch_size = array_ops.shape(logits)[0]
train_op = _train_op(loss, labels, train_op_fn, centered_bias,
batch_size, loss_fn, weight_tensor)
eval_metric_ops = metrics_fn(
weighted_average_loss, predictions, labels, weight_tensor)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=create_output_alternatives_fn(predictions))