本文整理汇总了Python中tensorflow.histogram_summary方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.histogram_summary方法的具体用法?Python tensorflow.histogram_summary怎么用?Python tensorflow.histogram_summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.histogram_summary方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def define_summaries(self):
'''Helper function for init_opt'''
all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
for k, v in self.log_vars:
if k.startswith('g'):
all_sum['g'].append(tf.scalar_summary(k, v))
elif k.startswith('d'):
all_sum['d'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_g'):
all_sum['hr_g'].append(tf.scalar_summary(k, v))
elif k.startswith('hr_d'):
all_sum['hr_d'].append(tf.scalar_summary(k, v))
elif k.startswith('hist'):
all_sum['hist'].append(tf.histogram_summary(k, v))
self.g_sum = tf.merge_summary(all_sum['g'])
self.d_sum = tf.merge_summary(all_sum['d'])
self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
self.hist_sum = tf.merge_summary(all_sum['hist'])
示例2: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
tf.summary.histogram(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例3: _setup_training
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _setup_training(self):
"""
Set up a data flow graph for fine tuning
"""
layer_num = self.layer_num
act_func = ACTIVATE_FUNC[self.activate_func]
sigma = self.sigma
lr = self.learning_rate
weights = self.weights
biases = self.biases
data1, data2 = self.data1, self.data2
batch_size = self.batch_size
optimizer = OPTIMIZER[self.optimizer]
with tf.name_scope("training"):
s1 = self._obtain_score(data1, weights, biases, act_func, "1")
s2 = self._obtain_score(data2, weights, biases, act_func, "2")
with tf.name_scope("cost"):
sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2))))
self.cost = cost = sum_cost / batch_size
self.optimize = optimizer(lr).minimize(cost)
for n in range(layer_num-1):
tf.histogram_summary("weight"+str(n), weights[n])
tf.histogram_summary("bias"+str(n), biases[n])
tf.scalar_summary("cost", cost)
示例4: nn_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, 'activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
示例5: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % LSPGlobals.TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例6: init_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def init_summaries(self):
"""
Initialize summaries for TensorBoard.
"""
# train
# for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# tf.histogram_summary(v.name, v, collections=['train'], name='variables')
tf.summary.scalar('LOSS/batch_train_loss', self.loss, collections=['train'])
if hasattr(self, 'learning_rate'):
tf.summary.scalar('learning_rate', self.learning_rate, collections=['train'])
# test
for v in tf.get_collection('moving_avgs'):
tf.summary.histogram(v.name, v, collections=['test'], name='moving_avgs')
# images
nb_imgs = 3
tf.summary.image('data', self.data, max_outputs=nb_imgs, collections=['images'])
tf.summary.image('output', self.output_clipped, max_outputs=nb_imgs, collections=['images'])
tf.summary.image('label', self.labels, max_outputs=nb_imgs, collections=['images'])
示例7: variable_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def variable_summaries(var, name):
"""
Attach a lot of summaries to a Tensor for Tensorboard visualization.
Ref: https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html
:param var: Variable to summarize
:param name: Summary name
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
示例8: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例9: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _activation_summary(self, x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
# Error: these summaries cause high classifier error!!!
# All inputs to node MergeSummary/MergeSummary must be from the same frame.
# tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name)
# tf.histogram_summary(tensor_name + '/activations', x)
# tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例10: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例11: _add_gradients_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
示例12: nn_layer_
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def nn_layer_(self,input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = self.weight_variable([input_dim, output_dim])
self.variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = self.bias_variable([output_dim])
self.variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, 'activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
示例13: fc_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def fc_layer(self, bottom, name):
with tf.variable_scope(name) as scope:
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
with tf.device('/cpu:0'):
weights = self.get_fc_weight(name)
biases = self.get_fc_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
#tf.histogram_summary('adascan/'+name+'_activations', fc)
#tf.histogram_summary('adascan/'+name+'_weights', weights)
scope.reuse_variables()
return fc
示例14: setupSummary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def setupSummary(self):
self.WHist = tf.histogram_summary("%s/weights" % self.name, self.W)
self.BHist = tf.histogram_summary("%s/biases" % self.name, self.b)
self.outputHist = tf.histogram_summary("%s/output" % self.name, self.output)
示例15: _backward
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import histogram_summary [as 别名]
def _backward(self, loss, summaries=False):
hps = self.hps
loss = loss * hps.num_steps
emb_vars = find_trainable_variables("emb")
lstm_vars = find_trainable_variables("LSTM")
softmax_vars = find_trainable_variables("softmax")
all_vars = emb_vars + lstm_vars + softmax_vars
grads = tf.gradients(loss, all_vars)
orig_grads = grads[:]
emb_grads = grads[:len(emb_vars)]
grads = grads[len(emb_vars):]
for i in range(len(emb_grads)):
assert isinstance(emb_grads[i], tf.IndexedSlices)
emb_grads[i] = tf.IndexedSlices(emb_grads[i].values * hps.batch_size, emb_grads[i].indices,
emb_grads[i].dense_shape)
lstm_grads = grads[:len(lstm_vars)]
softmax_grads = grads[len(lstm_vars):]
lstm_grads, lstm_norm = tf.clip_by_global_norm(lstm_grads, hps.max_grad_norm)
clipped_grads = emb_grads + lstm_grads + softmax_grads
assert len(clipped_grads) == len(orig_grads)
if summaries:
tf.scalar_summary("model/lstm_grad_norm", lstm_norm)
tf.scalar_summary("model/lstm_grad_scale", tf.minimum(hps.max_grad_norm / lstm_norm, 1.0))
tf.scalar_summary("model/lstm_weight_norm", tf.global_norm(lstm_vars))
# for v, g, cg in zip(all_vars, orig_grads, clipped_grads):
# name = v.name.lstrip("model/")
# tf.histogram_summary(name + "/var", v)
# tf.histogram_summary(name + "/grad", g)
# tf.histogram_summary(name + "/clipped_grad", cg)
return list(zip(clipped_grads, all_vars))