本文整理汇总了Python中tensorpack.tfutils.summary.add_moving_summary函数的典型用法代码示例。如果您正苦于以下问题:Python add_moving_summary函数的具体用法?Python add_moving_summary怎么用?Python add_moving_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add_moving_summary函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_graph
def _build_graph(self, inputs):
input, output = inputs
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Deconv2D],
W_init=tf.truncated_normal_initializer(stddev=0.02)), \
argscope(LeakyReLU, alpha=0.2):
with tf.variable_scope('gen'):
fake_output = self.generator(input)
with tf.variable_scope('discrim'):
real_pred = self.discriminator(input, output)
with tf.variable_scope('discrim', reuse=True):
fake_pred = self.discriminator(input, fake_output)
self.build_losses(real_pred, fake_pred)
errL1 = tf.reduce_mean(tf.abs(fake_output - output), name='L1_loss')
self.g_loss = tf.add(self.g_loss, LAMBDA * errL1, name='total_g_loss')
add_moving_summary(errL1, self.g_loss)
# tensorboard visualization
if IN_CH == 1:
input = tf.image.grayscale_to_rgb(input)
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
viz = (tf.concat([input, output, fake_output], 2) + 1.0) * 128.0
viz = tf.cast(tf.clip_by_value(viz, 0, 255), tf.uint8, name='viz')
tf.summary.image('input,output,fake', viz, max_outputs=max(30, BATCH))
self.collect_variables()
示例2: build_graph
def build_graph(self, input, output):
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Conv2DTranspose], kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
fake_output = self.generator(input)
with tf.variable_scope('discrim'):
real_pred = self.discriminator(input, output)
fake_pred = self.discriminator(input, fake_output)
self.build_losses(real_pred, fake_pred)
errL1 = tf.reduce_mean(tf.abs(fake_output - output), name='L1_loss')
self.g_loss = tf.add(self.g_loss, LAMBDA * errL1, name='total_g_loss')
add_moving_summary(errL1, self.g_loss)
# tensorboard visualization
if IN_CH == 1:
input = tf.image.grayscale_to_rgb(input)
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
visualize_tensors('input,output,fake', [input, output, fake_output], max_outputs=max(30, BATCH))
self.collect_variables()
示例3: _build_graph
def _build_graph(self, inputs):
input, nextinput = inputs
cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=param.rnn_size)
for _ in range(param.num_rnn_layer)])
def get_v(n):
ret = tf.get_variable(n + '_unused', [param.batch_size, param.rnn_size],
trainable=False,
initializer=tf.constant_initializer())
ret = tf.placeholder_with_default(ret, shape=[None, param.rnn_size], name=n)
return ret
initial = (rnn.LSTMStateTuple(get_v('c0'), get_v('h0')),
rnn.LSTMStateTuple(get_v('c1'), get_v('h1')))
embeddingW = tf.get_variable('embedding', [param.vocab_size, param.rnn_size])
input_feature = tf.nn.embedding_lookup(embeddingW, input) # B x seqlen x rnnsize
input_list = tf.unstack(input_feature, axis=1) # seqlen x (Bxrnnsize)
outputs, last_state = rnn.static_rnn(cell, input_list, initial, scope='rnnlm')
last_state = tf.identity(last_state, 'last_state')
# seqlen x (Bxrnnsize)
output = tf.reshape(tf.concat(outputs, 1), [-1, param.rnn_size]) # (Bxseqlen) x rnnsize
logits = FullyConnected('fc', output, param.vocab_size, nl=tf.identity)
tf.nn.softmax(logits / param.softmax_temprature, name='prob')
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.reshape(nextinput, [-1]))
self.cost = tf.reduce_mean(xent_loss, name='cost')
summary.add_param_summary(('.*/W', ['histogram'])) # monitor histogram of all W
summary.add_moving_summary(self.cost)
示例4: _build_graph
def _build_graph(self, inputs, is_training):
state, action, reward, next_state, isOver = inputs
self.predict_value = self._get_DQN_prediction(state, is_training)
action_onehot = tf.one_hot(action, NUM_ACTIONS)
pred_action_value = tf.reduce_sum(self.predict_value * action_onehot, 1) #N,
max_pred_reward = tf.reduce_mean(tf.reduce_max(
self.predict_value, 1), name='predict_reward')
add_moving_summary(max_pred_reward)
self.greedy_choice = tf.argmax(self.predict_value, 1) # N,
with tf.variable_scope('target'):
targetQ_predict_value = self._get_DQN_prediction(next_state, False) # NxA
# DQN
#best_v = tf.reduce_max(targetQ_predict_value, 1) # N,
# Double-DQN
predict_onehot = tf.one_hot(self.greedy_choice, NUM_ACTIONS, 1.0, 0.0)
best_v = tf.reduce_sum(targetQ_predict_value * predict_onehot, 1)
target = reward + (1.0 - tf.cast(isOver, tf.float32)) * GAMMA * tf.stop_gradient(best_v)
sqrcost = tf.square(target - pred_action_value)
abscost = tf.abs(target - pred_action_value) # robust error func
cost = tf.select(abscost < 1, sqrcost, abscost)
summary.add_param_summary([('conv.*/W', ['histogram', 'rms']),
('fc.*/W', ['histogram', 'rms']) ]) # monitor all W
self.cost = tf.reduce_mean(cost, name='cost')
示例5: multilevel_rpn_losses
def multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits):
"""
Args:
multilevel_anchors: #lvl RPNAnchors
multilevel_label_logits: #lvl tensors of shape HxWxA
multilevel_box_logits: #lvl tensors of shape HxWxAx4
Returns:
label_loss, box_loss
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_anchors) == num_lvl
assert len(multilevel_label_logits) == num_lvl
assert len(multilevel_box_logits) == num_lvl
losses = []
with tf.name_scope('rpn_losses'):
for lvl in range(num_lvl):
anchors = multilevel_anchors[lvl]
label_loss, box_loss = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(),
multilevel_label_logits[lvl], multilevel_box_logits[lvl],
name_scope='level{}'.format(lvl + 2))
losses.extend([label_loss, box_loss])
total_label_loss = tf.add_n(losses[::2], name='label_loss')
total_box_loss = tf.add_n(losses[1::2], name='box_loss')
add_moving_summary(total_label_loss, total_box_loss)
return total_label_loss, total_box_loss
示例6: fpn_map_rois_to_levels
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.to_int32(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))))
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
示例7: build_graph
def build_graph(self, image, label):
xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)
for x in range(WARP_TARGET_SIZE)], dtype='float32')
xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3
image = image / 255.0 - 0.5 # bhw2
def get_stn(image):
stn = (LinearWrap(image)
.AvgPooling('downsample', 2)
.Conv2D('conv0', 20, 5, padding='VALID')
.MaxPooling('pool0', 2)
.Conv2D('conv1', 20, 5, padding='VALID')
.FullyConnected('fc1', 32)
.FullyConnected('fct', 6, activation=tf.identity,
kernel_initializer=tf.constant_initializer(),
bias_initializer=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())
# output 6 parameters for affine transformation
stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3
stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2)
coor = tf.reshape(tf.matmul(xys, stn),
[WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2])
coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2
sampled = BilinearSample('warp', [image, coor], borderMode='constant')
return sampled
with argscope([Conv2D, FullyConnected], activation=tf.nn.relu):
with tf.variable_scope('STN1'):
sampled1 = get_stn(image)
with tf.variable_scope('STN2'):
sampled2 = get_stn(image)
# For visualization in tensorboard
with tf.name_scope('visualization'):
padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w
transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)
transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz')
tf.summary.image('visualize',
tf.expand_dims(stacked, -1), max_outputs=30)
sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
logits = (LinearWrap(sampled)
.FullyConnected('fc1', 256, activation=tf.nn.relu)
.FullyConnected('fc2', 128, activation=tf.nn.relu)
.FullyConnected('fct', 19, activation=tf.identity)())
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
summary.add_moving_summary(cost, wd_cost)
return tf.add_n([wd_cost, cost], name='cost')
示例8: build_losses
def build_losses(self, logits_real, logits_fake):
"""D and G play two-player minimax game with value function V(G,D)
min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]
Args:
logits_real (tf.Tensor): discrim logits from real samples
logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
"""
with tf.name_scope("GAN_loss"):
score_real = tf.sigmoid(logits_real)
score_fake = tf.sigmoid(logits_fake)
tf.summary.histogram('score-real', score_real)
tf.summary.histogram('score-fake', score_fake)
with tf.name_scope("discrim"):
d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')
d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')
d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')
d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')
self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')
with tf.name_scope("gen"):
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')
add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
示例9: build_graph
def build_graph(self, image_pos):
image_pos = image_pos / 128.0 - 1
z = tf.random_normal([self.batch, self.zdim], name='z_train')
z = tf.placeholder_with_default(z, [None, self.zdim], name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
alpha = tf.random_uniform(shape=[self.batch, 1, 1, 1],
minval=0., maxval=1., name='alpha')
interp = image_pos + alpha * (image_gen - image_pos)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
vec_interp = self.discriminator(interp)
# the Wasserstein-GAN losses
self.d_loss = tf.reduce_mean(vecneg - vecpos, name='d_loss')
self.g_loss = tf.negative(tf.reduce_mean(vecneg), name='g_loss')
# the gradient penalty loss
gradients = tf.gradients(vec_interp, [interp])[0]
gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)
self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)
self.collect_variables()
示例10: LSGAN_losses
def LSGAN_losses(real, fake):
d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real')
d_fake = tf.reduce_mean(tf.square(fake), name='d_fake')
d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss')
g_loss = tf.reduce_mean(tf.squared_difference(fake, 1), name='g_loss')
add_moving_summary(g_loss, d_loss)
return g_loss, d_loss
示例11: rpn_losses
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits):
"""
Args:
anchor_labels: fHxfWxNA
anchor_boxes: fHxfWxNAx4, encoded
label_logits: fHxfWxNA
box_logits: fHxfWxNAx4
Returns:
label_loss, box_loss
"""
with tf.device('/cpu:0'):
valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1))
pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1))
nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor')
nr_pos = tf.count_nonzero(pos_mask, dtype=tf.int32, name='num_pos_anchor')
valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask)
valid_label_logits = tf.boolean_mask(label_logits, valid_mask)
with tf.name_scope('label_metrics'):
valid_label_prob = tf.nn.sigmoid(valid_label_logits)
summaries = []
with tf.device('/cpu:0'):
for th in [0.5, 0.2, 0.1]:
valid_prediction = tf.cast(valid_label_prob > th, tf.int32)
nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction')
pos_prediction_corr = tf.count_nonzero(
tf.logical_and(
valid_label_prob > th,
tf.equal(valid_prediction, valid_anchor_labels)),
dtype=tf.int32)
summaries.append(tf.truediv(
pos_prediction_corr,
nr_pos, name='recall_th{}'.format(th)))
precision = tf.to_float(tf.truediv(pos_prediction_corr, nr_pos_prediction))
precision = tf.where(tf.equal(nr_pos_prediction, 0), 0.0, precision, name='precision_th{}'.format(th))
summaries.append(precision)
add_moving_summary(*summaries)
label_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(valid_anchor_labels), logits=valid_label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask)
pos_box_logits = tf.boolean_mask(box_logits, pos_mask)
delta = 1.0 / 9
box_loss = tf.losses.huber_loss(
pos_anchor_boxes, pos_box_logits, delta=delta,
reduction=tf.losses.Reduction.SUM) / delta
box_loss = tf.div(
box_loss,
tf.cast(nr_valid, tf.float32), name='box_loss')
add_moving_summary(label_loss, box_loss, nr_valid, nr_pos)
return label_loss, box_loss
示例12: _build_graph
def _build_graph(self, inputs):
x, y, label = inputs
x, y = self.embed([x, y])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(inputs[0]), name="emb")
cost = symbf.siamese_cosine_loss(x, y, label, scope="loss")
self.cost = tf.identity(cost, name="cost")
add_moving_summary(self.cost)
示例13: get_feature_match_loss
def get_feature_match_loss(self, feats_real, feats_fake):
losses = []
for real, fake in zip(feats_real, feats_fake):
loss = tf.reduce_mean(tf.squared_difference(
tf.reduce_mean(real, 0),
tf.reduce_mean(fake, 0)),
name='mse_feat_' + real.op.name)
losses.append(loss)
ret = tf.add_n(losses, name='feature_match_loss')
add_moving_summary(ret)
return ret
示例14: build_graph
def build_graph(self, x, y, label):
single_input = x
x, y = self.embed([x, y])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(single_input), name="emb")
cost = siamese_cosine_loss(x, y, label, scope="loss")
cost = tf.identity(cost, name="cost")
add_moving_summary(cost)
return cost
示例15: _build_graph
def _build_graph(self, inputs):
"""This function should build the model which takes the input variables
and define self.cost at the end"""
# inputs contains a list of input variables defined above
image, label = inputs
# In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here.
image = tf.expand_dims(image, 3)
image = image * 2 - 1 # center the pixels values at zero
# The context manager `argscope` sets the default option for all the layers under
# this context. Here we use 32 channel convolution with shape 3x3
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
logits = (LinearWrap(image)
.Conv2D('conv0')
.MaxPooling('pool0', 2)
.Conv2D('conv1')
.Conv2D('conv2')
.MaxPooling('pool1', 2)
.Conv2D('conv3')
.FullyConnected('fc0', 512, nl=tf.nn.relu)
.Dropout('dropout', 0.5)
.FullyConnected('fc1', out_dim=10, nl=tf.identity)())
tf.nn.softmax(logits, name='prob') # a Bx10 with probabilities
# a vector of length B with loss of each sample
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct')
accuracy = tf.reduce_mean(correct, name='accuracy')
# This will monitor training error (in a moving_average fashion):
# 1. write the value to tensosrboard
# 2. write the value to stat.json
# 3. print the value after each epoch
train_error = tf.reduce_mean(1 - correct, name='train_error')
summary.add_moving_summary(train_error, accuracy)
# Use a regex to find parameters to apply weight decay.
# Here we apply a weight decay on all W (weight matrix) of all fc layers
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
self.cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost, self.cost)
# monitor histogram of all weight (of conv and fc layers) in tensorboard
summary.add_param_summary(('.*/W', ['histogram', 'rms']))