本文整理汇总了Python中tensorflow.pad函数的典型用法代码示例。如果您正苦于以下问题:Python pad函数的具体用法?Python pad怎么用?Python pad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pad函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _tf_pad
def _tf_pad(x, szs, padding='SYMMETRIC'):
"""
Tensorflow can't handle padding by more than the dimension of the image.
This wrapper allows us to build padding up successively.
"""
def get_size(x):
# Often the batch will be None. Convert these to 0s
x_szs = x.get_shape().as_list()
x_szs = [0 if val is None else val for val in x_szs]
return x_szs
x_szs = get_size(x)
gt = [[sz[0] > x_sz, sz[1] > x_sz] for sz,x_sz in zip(szs, x_szs)]
while np.any(gt):
# This creates an intermediate padding amount that will bring in
# dimensions that are too big by the size of x.
szs_step = np.int32(gt) * np.stack([x_szs, x_szs], axis=-1)
x = tf.pad(x, szs_step, padding)
szs = szs - szs_step
x_szs = get_size(x)
gt = [[sz[0] > x_sz, sz[1] > x_sz] for sz,x_sz in zip(szs, x_szs)]
# Pad by the remaining amount
x = tf.pad(x, szs, 'SYMMETRIC')
return x
示例2: res_block
def res_block(x, a=None, filter_size=16, nonlinearity=concat_elu, keep_p=1.0, stride=1, gated=False, name="resnet"):
orig_x = x
print(orig_x.get_shape())
x_1 = conv_layer(nonlinearity(x), 3, stride, filter_size, name + '_conv_1')
if a is not None:
shape_a = int_shape(a)
shape_x_1 = int_shape(x_1)
a = tf.pad(
a, [[0, 0], [0, shape_x_1[1]-shape_a[1]], [0, shape_x_1[2]-shape_a[2]],
[0, 0]])
x_1 += nin(nonlinearity(a), filter_size, name + '_nin')
x_1 = nonlinearity(x_1)
if keep_p < 1.0:
x_1 = tf.nn.dropout(x_1, keep_prob=keep_p)
if not gated:
x_2 = conv_layer(x_1, 3, 1, filter_size, name + '_conv_2')
else:
x_2 = conv_layer(x_1, 3, 1, filter_size*2, name + '_conv_2')
x_2_1, x_2_2 = tf.split(3,2,x_2)
x_2 = x_2_1 * tf.nn.sigmoid(x_2_2)
if int(orig_x.get_shape()[2]) > int(x_2.get_shape()[2]):
assert(int(orig_x.get_shape()[2]) == 2*int(x_2.get_shape()[2]), "res net block only supports stirde 2")
orig_x = tf.nn.avg_pool(orig_x, [1,2,2,1], [1,2,2,1], padding='SAME')
# pad it
out_filter = filter_size
in_filter = int(orig_x.get_shape()[3])
if out_filter != in_filter:
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter-in_filter), 0]])
return orig_x + x_2
开发者ID:loliverhennigh,项目名称:ultrasound-nerve-segmentation-in-tensorflow,代码行数:34,代码来源:nerve_architecture.py
示例3: Rk
def Rk(input, k, reuse=False, norm='instance', is_training=True, name=None):
""" A residual block that contains two 3x3 convolutional layers
with the same number of filters on both layer
Args:
input: 4D Tensor
k: integer, number of filters (output depth)
reuse: boolean
name: string
Returns:
4D tensor (same shape as input)
"""
with tf.variable_scope(name, reuse=reuse):
with tf.variable_scope('layer1', reuse=reuse):
weights1 = _weights("weights1",
shape=[3, 3, input.get_shape()[3], k])
padded1 = tf.pad(input, [[0,0],[1,1],[1,1],[0,0]], 'REFLECT')
conv1 = tf.nn.conv2d(padded1, weights1,
strides=[1, 1, 1, 1], padding='VALID')
normalized1 = _norm(conv1, is_training, norm)
relu1 = tf.nn.relu(normalized1)
with tf.variable_scope('layer2', reuse=reuse):
weights2 = _weights("weights2",
shape=[3, 3, relu1.get_shape()[3], k])
padded2 = tf.pad(relu1, [[0,0],[1,1],[1,1],[0,0]], 'REFLECT')
conv2 = tf.nn.conv2d(padded2, weights2,
strides=[1, 1, 1, 1], padding='VALID')
normalized2 = _norm(conv2, is_training, norm)
output = input+normalized2
return output
示例4: add_edge_padding
def add_edge_padding(x, filter_size):
assert filter_size[0] % 2 == 1
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1) // 2 # vertical padding size
b = (filter_size[1] - 1) // 2 # horizontal padding size
if True:
x = tf.pad(x, [[0, 0], [a, a], [b, b], [0, 0]])
name = "_".join([str(dim) for dim in [a, b, *int_shape(x)[1:3]]])
pads = tf.get_collection(name)
if not pads:
if hvd.rank() == 0:
print("Creating pad", name)
pad = np.zeros([1] + int_shape(x)[1:3] + [1], dtype='float32')
pad[:, :a, :, 0] = 1.
pad[:, -a:, :, 0] = 1.
pad[:, :, :b, 0] = 1.
pad[:, :, -b:, 0] = 1.
pad = tf.convert_to_tensor(pad)
tf.add_to_collection(name, pad)
else:
pad = pads[0]
pad = tf.tile(pad, [tf.shape(x)[0], 1, 1, 1])
x = tf.concat([x, pad], axis=3)
else:
pad = tf.pad(tf.zeros_like(x[:, :, :, :1]) - 1,
[[0, 0], [a, a], [b, b], [0, 0]]) + 1
x = tf.pad(x, [[0, 0], [a, a], [b, b], [0, 0]])
x = tf.concat([x, pad], axis=3)
return x
示例5: __build
def __build(self):
self.__init_global_epoch()
self.__init_global_step()
self.__init_input()
with tf.name_scope('Preprocessing'):
red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
preprocessed_input = tf.concat([
tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
], 3)
x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
activation=tf.nn.relu, padding='VALID')
padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
stage2 = self.__stage(max_pool, stage=2, repeat=3)
stage3 = self.__stage(stage2, stage=3, repeat=7)
stage4 = self.__stage(stage3, stage=4, repeat=3)
global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
kernel_size=(1, 1),
l2_strength=self.args.l2_strength,
bias=self.args.bias,
is_training=self.is_training)
self.logits = flatten(logits_unflattened)
self.__init_output()
示例6: resnet_fpn_backbone
def resnet_fpn_backbone(image, num_blocks, freeze_c2=True):
shape2d = tf.shape(image)[2:]
mult = float(cfg.FPN.RESOLUTION_REQUIREMENT)
new_shape2d = tf.to_int32(tf.ceil(tf.to_float(shape2d) / mult) * mult)
pad_shape2d = new_shape2d - shape2d
assert len(num_blocks) == 4, num_blocks
with resnet_argscope():
chan = image.shape[1]
pad_base = maybe_reverse_pad(2, 3)
l = tf.pad(image, tf.stack(
[[0, 0], [0, 0],
[pad_base[0], pad_base[1] + pad_shape2d[0]],
[pad_base[0], pad_base[1] + pad_shape2d[1]]]))
l.set_shape([None, chan, None, None])
l = Conv2D('conv0', l, 64, 7, strides=2, activation=BNReLU, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
if freeze_c2:
c2 = tf.stop_gradient(c2)
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
c5 = resnet_group('group3', c4, resnet_bottleneck, 512, num_blocks[3], 2)
# 32x downsampling up to now
# size of c5: ceil(input/32)
return c2, c3, c4, c5
示例7: build_generator_resnet_6blocks
def build_generator_resnet_6blocks(inputgen, name="generator"):
with tf.variable_scope(name):
f = 7
ks = 3
pad_input = tf.pad(inputgen,[[0, 0], [ks, ks], [ks, ks], [0, 0]], "REFLECT")
o_c1 = general_conv2d(pad_input, ngf, f, f, 1, 1, 0.02,name="c1")
o_c2 = general_conv2d(o_c1, ngf*2, ks, ks, 2, 2, 0.02,"SAME","c2")
o_c3 = general_conv2d(o_c2, ngf*4, ks, ks, 2, 2, 0.02,"SAME","c3")
o_r1 = build_resnet_block(o_c3, ngf*4, "r1")
o_r2 = build_resnet_block(o_r1, ngf*4, "r2")
o_r3 = build_resnet_block(o_r2, ngf*4, "r3")
o_r4 = build_resnet_block(o_r3, ngf*4, "r4")
o_r5 = build_resnet_block(o_r4, ngf*4, "r5")
o_r6 = build_resnet_block(o_r5, ngf*4, "r6")
o_c4 = general_deconv2d(o_r6, [batch_size,64,64,ngf*2], ngf*2, ks, ks, 2, 2, 0.02,"SAME","c4")
o_c5 = general_deconv2d(o_c4, [batch_size,128,128,ngf], ngf, ks, ks, 2, 2, 0.02,"SAME","c5")
o_c5_pad = tf.pad(o_c5,[[0, 0], [ks, ks], [ks, ks], [0, 0]], "REFLECT")
o_c6 = general_conv2d(o_c5_pad, img_layer, f, f, 1, 1, 0.02,"VALID","c6",do_relu=False)
# Adding the tanh layer
out_gen = tf.nn.tanh(o_c6,"t1")
return out_gen
示例8: fixed_padding
def fixed_padding(inputs, kernel_size, data_format="channels_first"):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == "channels_first":
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
示例9: _residual_v1
def _residual_v1(self,
x,
kernel_size,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers, using Plan A for shortcut connection."""
del activate_before_residual
with tf.name_scope('residual_v1') as name_scope:
orig_x = x
x = self._conv(x, kernel_size, out_filter, stride)
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, kernel_size, out_filter, 1)
x = self._batch_norm(x)
if in_filter != out_filter:
orig_x = self._avg_pool(orig_x, stride, stride)
pad = (out_filter - in_filter) // 2
if self._data_format == 'channels_first':
orig_x = tf.pad(orig_x, [[0, 0], [pad, pad], [0, 0], [0, 0]])
else:
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad, pad]])
x = self._relu(tf.add(x, orig_x))
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
示例10: _conv_block
def _conv_block(self, inputs, numOut, name = 'conv_block'):
""" Convolutional Block
Args:
inputs : Input Tensor
numOut : Desired output number of channel
name : Name of the block
Returns:
conv_3 : Output Tensor
"""
if self.tiny:
with tf.name_scope(name):
norm = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
pad = tf.pad(norm, np.array([[0,0],[1,1],[1,1],[0,0]]), name= 'pad')
conv = self._conv(pad, int(numOut), kernel_size=3, strides=1, pad = 'VALID', name= 'conv')
return conv
else:
with tf.name_scope(name):
with tf.name_scope('norm_1'):
norm_1 = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
conv_1 = self._conv(norm_1, int(numOut/2), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_2'):
norm_2 = tf.contrib.layers.batch_norm(conv_1, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
pad = tf.pad(norm_2, np.array([[0,0],[1,1],[1,1],[0,0]]), name= 'pad')
conv_2 = self._conv(pad, int(numOut/2), kernel_size=3, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_3'):
norm_3 = tf.contrib.layers.batch_norm(conv_2, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
conv_3 = self._conv(norm_3, int(numOut), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
return conv_3
示例11: fixed_padding
def fixed_padding(inputs, kernel_size, data_format, conv_time_dim):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
feature_pad_beg = pad_total // 2
feature_pad_end = pad_total - feature_pad_beg
if conv_time_dim:
time_pad_beg = 0
time_pad_end = 0
else:
time_pad_beg = feature_pad_beg
time_pad_end = feature_pad_end
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[time_pad_beg, time_pad_end],
[feature_pad_beg, feature_pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [time_pad_beg, time_pad_end],
[feature_pad_end, feature_pad_end], [0, 0]])
return padded_inputs
示例12: _conv
def _conv(self, x, kernel_size, filters, strides, is_atrous=False):
"""Convolution."""
padding = 'SAME'
if not is_atrous and strides > 1:
pad = kernel_size - 1
pad_beg = pad // 2
pad_end = pad - pad_beg
if self._data_format == 'channels_first':
x = tf.pad(
x,
[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
x = tf.pad(
x,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
padding = 'VALID'
return tf.layers.conv2d(
inputs=x,
kernel_size=kernel_size,
filters=filters,
strides=strides,
padding=padding,
use_bias=False,
data_format=self._data_format)
示例13: pad_to_same_length
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", [x, y]):
x_length = tf.shape(x)[axis]
y_length = tf.shape(y)[axis]
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
示例14: generator
def generator(img, scope, gf_dim=64, reuse=False, train=True):
bn = functools.partial(slim.batch_norm, scale=True, is_training=train,
decay=0.9, epsilon=1e-5, updates_collections=None)
def residule_block(x, dim, scope='res'):
y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
y = relu(instance_norm(conv(y, dim, 3, 1, padding='VALID', scope=scope + '_conv1'), scope=scope + '_instance_norm1'))
y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
y = instance_norm(conv(y, dim, 3, 1, padding='VALID', scope=scope + '_conv2'), scope=scope + '_instance_norm2')
return y + x
with tf.variable_scope(scope + '_generator', reuse=reuse):
c0 = tf.pad(img, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
c1 = relu(instance_norm(conv(c0, gf_dim, 7, 1, padding='VALID', scope='c1_conv'), scope='c1_instance_norm'))
c2 = relu(instance_norm(conv(c1, gf_dim * 2, 3, 2, scope='c2_conv'), scope='c2_instance_norm'))
c3 = relu(instance_norm(conv(c2, gf_dim * 4, 3, 2, scope='c3_conv'), scope='c3_instance_norm'))
r1 = residule_block(c3, gf_dim * 4, scope='r1')
r2 = residule_block(r1, gf_dim * 4, scope='r2')
r3 = residule_block(r2, gf_dim * 4, scope='r3')
r4 = residule_block(r3, gf_dim * 4, scope='r4')
r5 = residule_block(r4, gf_dim * 4, scope='r5')
r6 = residule_block(r5, gf_dim * 4, scope='r6')
r7 = residule_block(r6, gf_dim * 4, scope='r7')
r8 = residule_block(r7, gf_dim * 4, scope='r8')
r9 = residule_block(r8, gf_dim * 4, scope='r9')
d1 = relu(instance_norm(deconv(r9, gf_dim * 2, 3, 2, scope='d1_dconv'), scope='d1_instance_norm'))
d2 = relu(instance_norm(deconv(d1, gf_dim, 3, 2, scope='d2_dconv'), scope='d2_instance_norm'))
d2 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
pred = conv(d2, 3, 7, 1, padding='VALID', scope='pred_conv')
pred = tf.nn.tanh(pred)
return pred
示例15: build_graph
def build_graph(self, image, label):
xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)
for x in range(WARP_TARGET_SIZE)], dtype='float32')
xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3
image = image / 255.0 - 0.5 # bhw2
def get_stn(image):
stn = (LinearWrap(image)
.AvgPooling('downsample', 2)
.Conv2D('conv0', 20, 5, padding='VALID')
.MaxPooling('pool0', 2)
.Conv2D('conv1', 20, 5, padding='VALID')
.FullyConnected('fc1', 32)
.FullyConnected('fct', 6, activation=tf.identity,
kernel_initializer=tf.constant_initializer(),
bias_initializer=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())
# output 6 parameters for affine transformation
stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3
stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2)
coor = tf.reshape(tf.matmul(xys, stn),
[WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2])
coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2
sampled = BilinearSample('warp', [image, coor], borderMode='constant')
return sampled
with argscope([Conv2D, FullyConnected], activation=tf.nn.relu):
with tf.variable_scope('STN1'):
sampled1 = get_stn(image)
with tf.variable_scope('STN2'):
sampled2 = get_stn(image)
# For visualization in tensorboard
with tf.name_scope('visualization'):
padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w
transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)
transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz')
tf.summary.image('visualize',
tf.expand_dims(stacked, -1), max_outputs=30)
sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
logits = (LinearWrap(sampled)
.FullyConnected('fc1', 256, activation=tf.nn.relu)
.FullyConnected('fc2', 128, activation=tf.nn.relu)
.FullyConnected('fct', 19, activation=tf.identity)())
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
summary.add_moving_summary(cost, wd_cost)
return tf.add_n([wd_cost, cost], name='cost')