本文整理汇总了Python中tensorflow.truncated_normal_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python truncated_normal_initializer函数的具体用法?Python truncated_normal_initializer怎么用?Python truncated_normal_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了truncated_normal_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_network
def build_network(self, images, class_num, is_training=True, keep_prob=0.5, scope='Fast-RCNN'):
self.conv1 = self.convLayer(images, 11, 11, 4, 4, 96, "conv1", "VALID")
lrn1 = self.LRN(self.conv1, 2, 2e-05, 0.75, "norm1")
self.pool1 = self.maxPoolLayer(lrn1, 3, 3, 2, 2, "pool1", "VALID")
self.conv2 = self.convLayer(self.pool1, 5, 5, 1, 1, 256, "conv2", groups=2)
lrn2 = self.LRN(self.conv2, 2, 2e-05, 0.75, "lrn2")
self.pool2 = self.maxPoolLayer(lrn2, 3, 3, 2, 2, "pool2", "VALID")
self.conv3 = self.convLayer(self.pool2, 3, 3, 1, 1, 384, "conv3")
self.conv4 = self.convLayer(self.conv3, 3, 3, 1, 1, 384, "conv4", groups=2)
self.conv5 = self.convLayer(self.conv4, 3, 3, 1, 1, 256, "conv5", groups=2)
self.roi_pool6 = roi_pooling(self.conv5, self.rois, pool_height=6, pool_width=6)
with slim.arg_scope([slim.fully_connected, slim.conv2d],
activation_fn=nn_ops.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
flatten = slim.flatten(self.roi_pool6, scope='flat_32')
self.fc1 = slim.fully_connected(flatten, 4096, scope='fc_6')
drop6 = slim.dropout(self.fc1, keep_prob=keep_prob, is_training=is_training, scope='dropout6',)
self.fc2 = slim.fully_connected(drop6, 4096, scope='fc_7')
drop7 = slim.dropout(self.fc2, keep_prob=keep_prob, is_training=is_training, scope='dropout7')
cls = slim.fully_connected(drop7, class_num,activation_fn=nn_ops.softmax ,scope='fc_8')
bbox = slim.fully_connected(drop7, (self.class_num-1)*4,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.001),
activation_fn=None ,scope='fc_9')
return cls,bbox
示例2: lstm_fs_
def lstm_fs_(xs, ys, batches, l, m, n):
#(name, shape=None, initializer=None,dtype=tf.float32, var_type="variable")
[Wf, Wi, WC, Wo] = map(lambda name: variable_on_cpu(name, shape=[m+n,m], initializer=tf.truncated_normal_initializer(stddev=1e-2)), ["Wf", "Wi", "WC", "Wo"])
Wo1 = variable_on_cpu( "Wo1", shape=[m, n], initializer=tf.truncated_normal_initializer(stddev=1e-2))
[bf, bi, bC, bo] = map(lambda name: variable_on_cpu(name, shape=[m], initializer=tf.truncated_normal_initializer(stddev=1e-2)), ["bf", "bi", "bC", "bo"])
bo1 = variable_on_cpu( "bo1", shape=[n], initializer=tf.truncated_normal_initializer(stddev=1e-2))
# C = variable_on_cpu("C", shape=[m], var_type="variable")
# h = variable_on_cpu("h", shape=[m], var_type="variable")
#C = tf.ones([batches,m])
C = tf.zeros([batches,m])
#h = tf.zeros([m])
#h = tf.ones([batches,m])
h = tf.zeros([batches,m])
(outs, end) = scan(lambda mem, x: step_lstm1(x, mem, Wf, bf, Wi, bi, WC, bC, Wo, bo, Wo1, bo1),
(C,h), xs, l)
yhats = tf.pack(outs)
#print(ys)
#print(yhats)
loss = cross_entropy(ys, yhats,t=1e-6)
#tf.nn.sparse_softmax_cross_entropy_with_logits(outs, yhats, name='xentropy')
#loss = cross_entropy(outs, yhats)
#is not actually accuracy
accuracy = cross_entropy(ys[-1], yhats[-1])
#tf.nn.sparse_softmax_cross_entropy_with_logits(outs[-1], yhats[-1])
return {"loss": loss, "inference": yhats, "accuracy": accuracy}
示例3: __init__
def __init__(self, distinctTagNum, w2vPath, c2vPath, numHidden):
self.distinctTagNum = distinctTagNum
self.numHidden = numHidden
self.w2v = self.load_w2v(w2vPath, FLAGS.embedding_word_size)
self.c2v = self.load_w2v(c2vPath, FLAGS.embedding_char_size)
self.words = tf.Variable(self.w2v, name="words")
self.chars = tf.Variable(self.c2v, name="chars")
with tf.variable_scope('Softmax') as scope:
self.W = tf.get_variable(
shape=[numHidden * 2, distinctTagNum],
initializer=tf.truncated_normal_initializer(stddev=0.01),
name="weights",
regularizer=tf.contrib.layers.l2_regularizer(0.001))
self.b = tf.Variable(tf.zeros([distinctTagNum], name="bias"))
with tf.variable_scope('CNN_Layer') as scope:
self.filter = tf.get_variable(
"filters_1",
shape=[2, FLAGS.embedding_char_size, 1,
FLAGS.embedding_char_size],
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
initializer=tf.truncated_normal_initializer(stddev=0.01),
dtype=tf.float32)
self.trains_params = None
self.inp_w = tf.placeholder(tf.int32,
shape=[None, FLAGS.max_sentence_len],
name="input_words")
self.inp_c = tf.placeholder(
tf.int32,
shape=[None, FLAGS.max_sentence_len * FLAGS.max_chars_per_word],
name="input_chars")
pass
示例4: _shared_encoder_network
def _shared_encoder_network(self):
# config SSE network to be shared encoder mode
# Build shared encoder
with tf.variable_scope('shared_encoder'):
# TODO: need play with forgetGate and peeholes here
if self.use_lstm:
src_single_cell = tf.nn.rnn_cell.LSTMCell(self.src_cell_size, forget_bias=1.0, use_peepholes=False)
else:
src_single_cell = tf.nn.rnn_cell.GRUCell(self.src_cell_size)
src_cell = src_single_cell
if self.num_layers > 1:
src_cell = tf.nn.rnn_cell.MultiRNNCell([src_single_cell] * self.num_layers)
#compute source sequence related tensors
src_output, _ = tf.nn.dynamic_rnn(src_cell, self.src_input_distributed, sequence_length=self._src_lens,
dtype=tf.float32)
src_last_output = self._last_relevant(src_output, self._src_lens)
self.src_M = tf.get_variable('src_M', shape=[self.src_cell_size, self.seq_embed_size],
initializer=tf.truncated_normal_initializer())
# self.src_b = tf.get_variable('src_b', shape=[self.seq_embed_size])
self.src_seq_embedding = tf.matmul(src_last_output, self.src_M) # + self.src_b
#declare tgt_M tensor before reuse them
self.tgt_M = tf.get_variable('tgt_M', shape=[self.src_cell_size, self.seq_embed_size],
initializer=tf.truncated_normal_initializer())
# self.tgt_b = tf.get_variable('tgt_b', shape=[self.seq_embed_size])
with tf.variable_scope('shared_encoder', reuse=True):
#compute target sequence related tensors by reusing shared_encoder model
tgt_output, _ = tf.nn.dynamic_rnn(src_cell, self.tgt_input_distributed, sequence_length=self._tgt_lens,
dtype=tf.float32)
tgt_last_output = self._last_relevant(tgt_output, self._tgt_lens)
self.tgt_seq_embedding = tf.matmul(tgt_last_output, self.tgt_M) # + self.tgt_b
示例5: model
def model(data, prev_outputs, image_size, n_channels, n_actions, n_prev_actions):
kernel_defs = [(8, 16, 4), (2, 32, 1)] # each conv layer, (patch_side, n_kernels, stride)
fc_sizes = [256]
n_input_kernels = n_channels
for i, k in enumerate(kernel_defs):
with tf.variable_scope("conv_%i" % i):
kernel_shape = (k[0], k[0], n_input_kernels, k[1])
data = conv_relu(data, kernel_shape, k[2])
n_input_kernels = k[1]
for i, n in enumerate(fc_sizes):
with tf.variable_scope("fc_%i" % i):
if i == 0:
previous_n = kernel_defs[-1][1] * np.prod(image_size) / np.prod([k[2] for k in kernel_defs])**2
data = tf.reshape(data, [-1, previous_n])
reshape_prev_outputs = tf.reshape(prev_outputs, [-1, n_actions * n_prev_actions])
prev_outputs_weights = tf.get_variable("prev_outputs_weights", [n_actions * n_prev_actions, n],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01/np.sqrt(n_prev_actions * n_actions)))
else:
previous_n = fc_sizes[i-1]
weights = tf.get_variable("weights", [previous_n, n],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(previous_n)))
biases = tf.get_variable("biases", [n], initializer=tf.constant_initializer(0.0))
relu_input = tf.matmul(data, weights) + biases
if i == 0:
relu_input += 0.1 * (previous_n / n_actions / n_prev_actions) * tf.matmul(reshape_prev_outputs, prev_outputs_weights)
data = tf.nn.relu(relu_input)
with tf.variable_scope("flat_out"):
weights = tf.get_variable("weights", [fc_sizes[-1], n_actions],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(fc_sizes[-1])))
biases = tf.get_variable("biases", [n_actions], initializer=tf.constant_initializer(0.0))
return tf.matmul(data, weights) + biases
示例6: testCheckInitializers
def testCheckInitializers(self):
initializers = {
"key_a": tf.truncated_normal_initializer(mean=0, stddev=1),
"key_c": tf.truncated_normal_initializer(mean=0, stddev=1),
}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
del initializers["key_c"]
initializers["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {
"key_c": tf.truncated_normal_initializer(mean=0, stddev=1),
"key_d": tf.truncated_normal_initializer(mean=0, stddev=1),
}
snt.check_initializers(initializers=initializers, keys=keys)
示例7: layers
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# upsampling on layer7 by 2
input = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, strides=(1,1), padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#skip connection followed by upsampling on layer4 by 2
input = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, strides=(1,1), padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
input = tf.add(input, output)
output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#skip connection followed by upsampling on layer3 by 8
input = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, strides=(1,1), padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
input = tf.add(input, output)
nn_last_layer = tf.layers.conv2d_transpose(input, num_classes, 32, strides = (8, 8), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
return nn_last_layer
示例8: Discriminator
def Discriminator(image_Pattern, initial_Filter_Count = 64, attribute_Count = 10, reuse = False):
with tf.variable_scope('discriminator', reuse=reuse):
hidden_Activation = image_Pattern;
for index in range(6):
hidden_Activation = tf.nn.leaky_relu(
tf.layers.conv2d(
inputs = hidden_Activation,
filters = initial_Filter_Count * (2 ** index),
kernel_size = 4,
strides = 2,
padding = "same",
kernel_initializer = tf.truncated_normal_initializer(stddev=0.02)
),
alpha=0.01,
name="hidden_Layer{}".format(index)
)
output_Activation = tf.layers.conv2d(
inputs = hidden_Activation,
filters = 1 + attribute_Count,
kernel_size = hidden_Activation.get_shape()[1:3],
strides = 1,
padding = "valid",
name = "output_Layer",
use_bias = False,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.02)
)
discrimination_Logit, attribute_Logit = tf.split(
tf.squeeze(output_Activation, axis=[1,2]),
num_or_size_splits = [1, attribute_Count],
axis = 1
)
return discrimination_Logit, attribute_Logit;
示例9: discriminator
def discriminator(images, reuse_variables=None):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:
# First convolutional and pool layers
# This finds 32 different 5 x 5 pixel features
d_w1 = tf.get_variable('d_w1', [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b1 = tf.get_variable('d_b1', [32], initializer=tf.constant_initializer(0))
d1 = tf.nn.conv2d(input=images, filter=d_w1, strides=[1, 1, 1, 1], padding='SAME')
d1 = d1 + d_b1
d1 = tf.nn.relu(d1)
d1 = tf.nn.avg_pool(d1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Second convolutional and pool layers
# This finds 64 different 5 x 5 pixel features
d_w2 = tf.get_variable('d_w2', [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b2 = tf.get_variable('d_b2', [64], initializer=tf.constant_initializer(0))
d2 = tf.nn.conv2d(input=d1, filter=d_w2, strides=[1, 1, 1, 1], padding='SAME')
d2 = d2 + d_b2
d2 = tf.nn.relu(d2)
d2 = tf.nn.avg_pool(d2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# First fully connected layer
d_w3 = tf.get_variable('d_w3', [7 * 7 * 64, 1024], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b3 = tf.get_variable('d_b3', [1024], initializer=tf.constant_initializer(0))
d3 = tf.reshape(d2, [-1, 7 * 7 * 64])
d3 = tf.matmul(d3, d_w3)
d3 = d3 + d_b3
d3 = tf.nn.relu(d3)
# Second fully connected layer
d_w4 = tf.get_variable('d_w4', [1024, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b4 = tf.get_variable('d_b4', [1], initializer=tf.constant_initializer(0))
d4 = tf.matmul(d3, d_w4) + d_b4
# d4 contains unscaled values
return d4
示例10: inference
def inference(images):
def _variable_with_weight_decay(name, shape, stddev, wd):
var = tf.get_variable(name, shape=shape, initializer=tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
with tf.variable_scope('conv1') as scope:
kernel = tf.get_variable('weights', shape=[3, 3, 3, 32], initializer=tf.truncated_normal_initializer(stddev=1e-4))
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', shape=[32], initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.variable_scope('conv2') as scope:
kernel = tf.get_variable('weights', shape=[3, 3, 32, 64], initializer=tf.truncated_normal_initializer(stddev=1e-4))
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = tf.get_variable('weights', shape=[3, 3, 64, 128], initializer=tf.truncated_normal_initializer(stddev=1e-4))
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', shape=[128], initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
with tf.variable_scope('conv4') as scope:
kernel = tf.get_variable('weights', shape=[3, 3, 128, 256], initializer=tf.truncated_normal_initializer(stddev=1e-4))
conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope.name)
pool4 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
with tf.variable_scope('fc5') as scope:
dim = 1
for d in pool4.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool4, [BATCH_SIZE, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 1024], stddev=0.05, wd=0.005)
biases = tf.get_variable('biases', shape=[1024], initializer=tf.constant_initializer(0.1))
fc5 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
with tf.variable_scope('fc6') as scope:
weights = _variable_with_weight_decay('weights', shape=[1024, 256], stddev=0.05, wd=0.005)
biases = tf.get_variable('biases', shape=[256], initializer=tf.constant_initializer(0.1))
fc6 = tf.nn.relu_layer(fc5, weights, biases, name=scope.name)
with tf.variable_scope('fc7') as scope:
weights = _variable_with_weight_decay('weights', shape=[256, NUM_CLASSES], stddev=0.05, wd=0.005)
biases = tf.get_variable('biases', shape=[NUM_CLASSES], initializer=tf.constant_initializer(0.1))
fc7 = tf.nn.xw_plus_b(fc6, weights, biases, name=scope.name)
return fc7
示例11: Discriminator_with_Vanilla
def Discriminator_with_Vanilla(input_Pattern, hidden_Unit_Size = 128, label_Unit_Size = 10, is_Training = True, reuse = False):
with tf.variable_scope('discriminator', reuse=reuse):
hidden_Activation = tf.layers.dense(
inputs = input_Pattern,
units = hidden_Unit_Size,
activation = tf.nn.relu,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "hidden"
)
discrimination_Logits = tf.layers.dense(
inputs = hidden_Activation,
units = 1,
activation = None,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "discrimination"
)
discrimination_Activation = tf.nn.sigmoid(discrimination_Logits);
label_Logits = tf.layers.dense(
inputs = hidden_Activation,
units = label_Unit_Size,
activation = None,
use_bias = True,
kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
bias_initializer = tf.zeros_initializer(),
name = "label"
)
label_Activation = tf.nn.softmax(label_Logits);
return discrimination_Logits, label_Logits, discrimination_Activation, label_Activation;
示例12: Generator
def Generator(image_Pattern, is_Training = True, name = "generator", reuse = False):
with tf.variable_scope(name, reuse=reuse):
convolution_Activation = tf.nn.leaky_relu(
tf.layers.conv2d(
inputs = image_Pattern,
filters = 2 ** 6,
kernel_size = [4,4],
strides = (2,2),
padding = "same",
use_bias = False,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
)
)
for power in range(7, 10):
convolution_Activation = tf.nn.leaky_relu(
tf.layers.batch_normalization(
tf.layers.conv2d(
inputs = convolution_Activation,
filters = 2 ** power,
kernel_size = [4,4],
strides = (2,2),
padding = "same",
use_bias = False,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
),
training = is_Training
)
)
convolution_Transpose_Activation = convolution_Activation;
for power in reversed(range(6, 9)):
convolution_Transpose_Activation = tf.nn.leaky_relu(
tf.layers.batch_normalization(
tf.layers.conv2d_transpose(
inputs = convolution_Transpose_Activation,
filters = 2 ** power,
kernel_size = [4,4],
strides = (2,2),
padding = "same",
use_bias = False,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
),
training = is_Training
)
)
generator_Logit = tf.layers.conv2d_transpose(
inputs = convolution_Transpose_Activation,
filters = 3, #RGB
kernel_size = [4,4],
strides = (2,2),
padding = "same",
use_bias = False,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
)
generator_Activation = tf.nn.tanh(generator_Logit);
return generator_Logit, generator_Activation;
示例13: _build_network
def _build_network(self, is_training=True):
# select initializers
if cfg.TRAIN.TRUNCATED:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
else:
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)
net_conv = self._image_to_head(is_training)
with tf.variable_scope(self._scope, self._scope):
# build the anchors for the image
self._anchor_component()
# region proposal network
rois = self._region_proposal(net_conv, is_training, initializer)
# region of interest pooling
if cfg.POOLING_MODE == 'crop':
pool5 = self._crop_pool_layer(net_conv, rois, "pool5")
else:
raise NotImplementedError
fc7 = self._head_to_tail(pool5, is_training)
with tf.variable_scope(self._scope, self._scope):
# region classification
cls_prob, bbox_pred = self._region_classification(fc7, is_training,
initializer, initializer_bbox)
self._score_summaries.update(self._predictions)
return rois, cls_prob, bbox_pred
示例14: fc
def fc(self, input, num_out, name, relu=True, trainable=True):
with tf.variable_scope(name) as scope:
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
if name == 'bbox_pred':
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_biases = tf.constant_initializer(0.0)
else:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
weights = self.make_var('weights', [dim, num_out], init_weights, trainable, \
regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
biases = self.make_var('biases', [num_out], init_biases, trainable)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
示例15: residual_block
def residual_block(input_, dilation, kwidth, num_kernels=1,
bias_init=None, stddev=0.02, do_skip=True,
name='residual_block'):
print('input shape to residual block: ', input_.get_shape())
with tf.variable_scope(name):
h_a = atrous_conv1d(input_, dilation, kwidth, num_kernels,
bias_init=bias_init, stddev=stddev)
h = tf.tanh(h_a)
# apply gated activation
z_a = atrous_conv1d(input_, dilation, kwidth, num_kernels,
name='conv_gate', bias_init=bias_init,
stddev=stddev)
z = tf.nn.sigmoid(z_a)
print('gate shape: ', z.get_shape())
# element-wise apply the gate
gated_h = tf.mul(z, h)
print('gated h shape: ', gated_h.get_shape())
#make res connection
h_ = conv1d(gated_h, kwidth=1, num_kernels=1,
init=tf.truncated_normal_initializer(stddev=stddev),
name='residual_conv1')
res = h_ + input_
print('residual result: ', res.get_shape())
if do_skip:
#make skip connection
skip = conv1d(gated_h, kwidth=1, num_kernels=1,
init=tf.truncated_normal_initializer(stddev=stddev),
name='skip_conv1')
return res, skip
else:
return res