本文整理汇总了Python中tensorflow.contrib.slim.xavier_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python slim.xavier_initializer方法的具体用法?Python slim.xavier_initializer怎么用?Python slim.xavier_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.xavier_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: se_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def se_module(input_net, ratio=16, reuse = None, scope = None):
with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
assert c % ratio == 0
hidden_units = int(c / ratio)
squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
excitation = slim.flatten(squeeze)
excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.relu)
excitation = slim.fully_connected(excitation, c, scope='se_fc2',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.sigmoid)
excitation = tf.reshape(excitation, [-1,1,1,c])
output_net = input_net * excitation
return output_net
示例2: conv_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def conv_module(net, num_res_layers, num_kernels, trans_kernel_size=3, trans_stride=2,
use_se=False, reuse=None, scope=None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
net = slim.conv2d(net, num_kernels, kernel_size=trans_kernel_size, stride=trans_stride, padding='SAME',
weights_initializer=slim.xavier_initializer())
shortcut = net
for i in range(num_res_layers):
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
print('| ---- block_%d' % i)
if use_se:
net = se_module(net)
net = net + shortcut
shortcut = net
return net
示例3: se_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def se_module(input_net, ratio=16, reuse = None, scope = None):
with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
assert c % ratio == 0
hidden_units = int(c / ratio)
squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
excitation = slim.flatten(squeeze)
excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
weights_regularizer=None,
# weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.relu)
excitation = slim.fully_connected(excitation, c, scope='se_fc2',
weights_regularizer=None,
# weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.sigmoid)
excitation = tf.reshape(excitation, [-1,1,1,c])
output_net = input_net * excitation
return output_net
示例4: R_Net
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def R_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
with slim.arg_scope([slim.conv2d],
activation_fn = prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
print( inputs.get_shape())
net = slim.conv2d(inputs, num_outputs=28, kernel_size=[3,3], stride=1, scope="conv1")
print( net.get_shape())
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope="pool1", padding='SAME')
print( net.get_shape())
net = slim.conv2d(net,num_outputs=48,kernel_size=[3,3],stride=1,scope="conv2")
print( net.get_shape())
net = slim.max_pool2d(net,kernel_size=[3,3],stride=2,scope="pool2")
print( net.get_shape())
net = slim.conv2d(net,num_outputs=64,kernel_size=[2,2],stride=1,scope="conv3")
print( net.get_shape())
fc_flatten = slim.flatten(net)
print( fc_flatten.get_shape())
fc1 = slim.fully_connected(fc_flatten, num_outputs=128,scope="fc1")
print( fc1.get_shape())
#batch*2
cls_prob = slim.fully_connected(fc1,num_outputs=2,scope="cls_fc",activation_fn=tf.nn.softmax)
print( cls_prob.get_shape())
#batch*4
bbox_pred = slim.fully_connected(fc1,num_outputs=4,scope="bbox_fc",activation_fn=None)
print( bbox_pred.get_shape())
#batch*10
landmark_pred = slim.fully_connected(fc1,num_outputs=10,scope="landmark_fc",activation_fn=None)
print( landmark_pred.get_shape())
#train
if training:
cls_loss = cls_ohem(cls_prob,label)
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
accuracy = cal_accuracy(cls_prob,label)
landmark_loss = landmark_ohem(landmark_pred,landmark_target,label)
L2_loss = tf.add_n(slim.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
else:
return cls_prob,bbox_pred,landmark_pred
示例5: flatten_fully_connected
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def flatten_fully_connected(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
示例6: flatten_fully_connected_v2
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def flatten_fully_connected_v2(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_normalizer_fn=None,
weights_normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with variable_scope.variable_scope(scope, 'flatten_fully_connected_v2'):
if inputs.shape.ndims > 2:
inputs = layers.flatten(inputs)
return fully_connected(inputs=inputs,
num_outputs=num_outputs,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_normalizer_fn=weights_normalizer_fn,
weights_normalizer_params=weights_normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope=scope)
示例7: flatten_fully_connected_v1
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def flatten_fully_connected_v1(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected_v1'):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
示例8: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def inference(images, embedding_size=512, reuse=None, scope='SphereNet'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(0.0),
normalizer_fn=None,
normalizer_params=None,
activation_fn=parametric_relu):
with tf.variable_scope('SphereNet', [images], reuse=reuse):
# Fix the moving mean and std when training PFE
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):
print('SphereNet input shape:', [dim.value for dim in images.shape])
model_version = '64'
num_layers, num_kernels = model_params[model_version]
net = conv_module(images, num_layers[0], num_kernels[0], scope='conv1')
print('module_1 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[1], num_kernels[1], scope='conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[2], num_kernels[2], scope='conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv_module(net, num_layers[3], num_kernels[3], scope='conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net_ = net
net = slim.flatten(net)
mu = slim.fully_connected(net, embedding_size, scope='Bottleneck',
weights_initializer=slim.xavier_initializer(),
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_last,
activation_fn=None)
# Output used for PFE
mu = tf.nn.l2_normalize(mu, axis=1)
conv_final = net
return mu, conv_final
示例9: conv_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def conv_module(net, num_res_layers, num_kernels, reuse = None, scope = None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
# Every 2 conv layers constitute a residual block
if scope == 'conv1':
for i in range(len(num_kernels)):
with tf.variable_scope('layer_%d'%i, reuse=reuse):
net = slim.conv2d(net, num_kernels[i], kernel_size=3, stride=1, padding='VALID',
weights_initializer=slim.xavier_initializer())
print('| ---- layer_%d' % i)
net = slim.max_pool2d(net, 2, stride=2, padding='VALID')
else:
shortcut = net
for i in range(num_res_layers):
with tf.variable_scope('layer_%d'%i, reuse=reuse):
net = slim.conv2d(net, num_kernels[0], kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
print('| ---- layer_%d' % i)
if i % 2 == 1:
net = se_module(net)
net = net + shortcut
shortcut = net
print('| shortcut')
# Pooling for conv2 - conv4
if len(num_kernels) > 1:
with tf.variable_scope('expand', reuse=reuse):
net = slim.conv2d(net, num_kernels[1], kernel_size=3, stride=1, padding='VALID',
weights_initializer=slim.xavier_initializer())
net = slim.max_pool2d(net, 2, stride=2, padding='VALID')
print('- expand')
return net
示例10: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0, reuse=None, model_version=None):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=activation,
normalizer_fn=None,
normalizer_params=None):
with tf.variable_scope('FaceResNet', [images], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
print('input shape:', [dim.value for dim in images.shape])
net = conv_module(images, 0, [32, 64], scope='conv1')
print('module_1 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 2, [64, 128], scope='conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 4, [128, 256], scope='conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 10, [256, 512], scope='conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 6, [512], scope='conv5')
print('module_5 shape:', [dim.value for dim in net.shape])
net = slim.flatten(net)
net = slim.fully_connected(net, bottleneck_layer_size, scope='Bottleneck',
weights_initializer=slim.xavier_initializer(),
activation_fn=None)
return net
示例11: conv_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def conv_module(net, num_res_layers, num_kernels, reuse = None, scope = None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
# Every 2 conv layers constitute a residual block
if scope == 'conv1':
for i in range(len(num_kernels)):
with tf.variable_scope('layer_%d'%i, reuse=reuse):
net = slim.conv2d(net, num_kernels[i], kernel_size=3, stride=1, padding='VALID',
weights_initializer=slim.xavier_initializer())
# net = activation(net)
print('| ---- layer_%d' % i)
net = slim.max_pool2d(net, 2, stride=2, padding='VALID')
else:
shortcut = net
for i in range(num_res_layers):
with tf.variable_scope('layer_%d'%i, reuse=reuse):
net = slim.conv2d(net, num_kernels[0], kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
# net = activation(net)
print('| ---- layer_%d' % i)
if i % 2 == 1:
net = se_module(net)
net = net + shortcut
shortcut = net
print('| shortcut')
# Pooling for conv2 - conv4
if len(num_kernels) > 1:
with tf.variable_scope('expand', reuse=reuse):
# net = slim.batch_norm(net, **batch_norm_params)
net = slim.conv2d(net, num_kernels[1], kernel_size=3, stride=1, padding='VALID',
weights_initializer=slim.xavier_initializer())
# net = activation(net)
net = slim.max_pool2d(net, 2, stride=2, padding='VALID')
print('- expand')
return net
示例12: cosine_softmax
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def cosine_softmax(prelogits, label, num_classes, weight_decay, scale=16.0, reuse=None):
''' Tensorflow implementation of L2-Sofmax, proposed in:
R. Ranjan, C. D. Castillo, and R. Chellappa. L2 constrained softmax loss for
discriminativeface verification. arXiv:1703.09507, 2017.
'''
nrof_features = prelogits.shape[1].value
with tf.variable_scope('Logits', reuse=reuse):
weights = tf.get_variable('weights', shape=(nrof_features, num_classes),
regularizer=slim.l2_regularizer(weight_decay),
initializer=slim.xavier_initializer(),
# initializer=tf.truncated_normal_initializer(stddev=0.1),
dtype=tf.float32)
_scale = tf.get_variable('scale', shape=(),
regularizer=slim.l2_regularizer(1e-2),
initializer=tf.constant_initializer(1.00),
trainable=True,
dtype=tf.float32)
weights_normed = tf.nn.l2_normalize(weights, dim=0)
prelogits_normed = tf.nn.l2_normalize(prelogits, dim=1)
if scale == 'auto':
scale = tf.nn.softplus(_scale)
else:
assert type(scale) == float
scale = tf.constant(scale)
logits = scale * tf.matmul(prelogits_normed, weights_normed)
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
labels=label, logits=logits), name='cross_entropy')
return logits, cross_entropy
示例13: flatten_fully_connected
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def flatten_fully_connected(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
# lrelu
示例14: _region_proposal
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def _region_proposal(self, net_conv, is_training):
initializer = slim.xavier_initializer(uniform=True)
rpn = slim.conv2d(net_conv, cfg.RPN_CHANNELS, [3, 3], trainable=is_training, weights_initializer=initializer,
scope="rpn_conv/3x3")
self._act_summaries.append(rpn)
hidden_num = 128
# bi_lstm shape: [-1, hidden_num * 2]
bi_lstm = self._BiLstm(rpn, cfg.RPN_CHANNELS, hidden_num, name="bi_lstm")
shape = tf.shape(rpn)
N, H, W, _ = shape[0], shape[1], shape[2], shape[3]
bi_lstm_reshape = tf.reshape(bi_lstm, [N, H, W, hidden_num * 2])
fc = slim.conv2d(bi_lstm_reshape, 512, [1, 1], weights_initializer=initializer,
padding='VALID', scope='conv_fc')
# use 1x1 conv as FC (N, H, W, num_anchors * 2)
rpn_cls_score = slim.conv2d(fc, self._num_anchors * 2, [1, 1], weights_initializer=initializer,
padding='VALID', activation_fn=None, scope='rpn_cls_score')
# use 1x1 conv as FC (N, H, W, num_anchors * 4)
rpn_bbox_pred = slim.conv2d(fc, self._num_anchors * 4, [1, 1], weights_initializer=initializer,
padding='VALID', activation_fn=None, scope='rpn_bbox_pred')
# (N, H, W, num_anchors * 2) -> (N, H, W * num_anchors, 2)
rpn_cls_score_reshape = self._reshape_layer(rpn_cls_score, 2, 'rpn_cls_score_reshape')
rpn_cls_prob = self._softmax_layer(rpn_cls_score_reshape, "rpn_cls_prob")
# (N, H, W*num_anchors, 2) -> (N, H, W, num_anchors*2)
rpn_cls_prob_reshape = self._reshape_layer(rpn_cls_prob, self._num_anchors * 2, "rpn_cls_prob_reshape")
if is_training:
self._anchor_target_layer(rpn_cls_score, "anchor")
else:
if cfg.TEST.MODE == 'nms':
rois, _ = self._proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, "rois")
elif cfg.TEST.MODE == 'top':
rois, _ = self._proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, "rois")
else:
raise NotImplementedError
self._predictions["rois"] = rois
self._predictions["rpn_cls_score"] = rpn_cls_score
self._predictions["rpn_cls_score_reshape"] = rpn_cls_score_reshape
self._predictions["rpn_cls_prob"] = rpn_cls_prob_reshape
self._predictions["rpn_bbox_pred"] = rpn_bbox_pred
示例15: P_Net
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import xavier_initializer [as 别名]
def P_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
#define common param
with slim.arg_scope([slim.conv2d],
activation_fn=prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
print( inputs.get_shape())
net = slim.conv2d(inputs, 10, 3, stride=1,scope='conv1')
print( net.get_shape())
net = slim.max_pool2d(net, kernel_size=[2,2], stride=2, scope='pool1', padding='SAME')
print( net.get_shape())
net = slim.conv2d(net,num_outputs=16,kernel_size=[3,3],stride=1,scope='conv2')
print( net.get_shape())
net = slim.conv2d(net,num_outputs=32,kernel_size=[3,3],stride=1,scope='conv3')
print( net.get_shape())
#batch*H*W*2
conv4_1 = slim.conv2d(net,num_outputs=2,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.softmax)
#conv4_1 = slim.conv2d(net,num_outputs=1,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.sigmoid)
print( conv4_1.get_shape())
#batch*H*W*4
bbox_pred = slim.conv2d(net,num_outputs=4,kernel_size=[1,1],stride=1,scope='conv4_2',activation_fn=None)
print( bbox_pred.get_shape())
#batch*H*W*10
landmark_pred = slim.conv2d(net,num_outputs=10,kernel_size=[1,1],stride=1,scope='conv4_3',activation_fn=None)
print( landmark_pred.get_shape())
#cls_prob_original = conv4_1
#bbox_pred_original = bbox_pred
if training:
#batch*2
cls_prob = tf.squeeze(conv4_1,[1,2],name='cls_prob')
cls_loss = cls_ohem(cls_prob,label)
#batch
bbox_pred = tf.squeeze(bbox_pred,[1,2],name='bbox_pred')
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
#batch*10
landmark_pred = tf.squeeze(landmark_pred,[1,2],name="landmark_pred")
landmark_loss = landmark_ohem(landmark_pred,landmark_target,label)
accuracy = cal_accuracy(cls_prob,label)
L2_loss = tf.add_n(slim.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
#test
else:
#when test,batch_size = 1
cls_pro_test = tf.squeeze(conv4_1, axis=0)
bbox_pred_test = tf.squeeze(bbox_pred,axis=0)
landmark_pred_test = tf.squeeze(landmark_pred,axis=0)
return cls_pro_test,bbox_pred_test,landmark_pred_test