本文整理汇总了Python中tensorflow.contrib.slim.arg_scope函数的典型用法代码示例。如果您正苦于以下问题:Python arg_scope函数的具体用法?Python arg_scope怎么用?Python arg_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了arg_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_network_byname
def get_network_byname(net_name,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True):
if net_name not in ['resnet_v1_50', 'mobilenet_224', 'inception_resnet', 'vgg16', 'resnet_v1_101']:
raise ValueError('''not include network: {}, net_name must in [resnet_v1_50, mobilenet_224,
inception_resnet, vgg16, resnet_v1_101]
'''.format(net_name))
if net_name == 'resnet_v1_50':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=cfgs.WEIGHT_DECAY[net_name])):
logits, end_points = resnet_v1.resnet_v1_50(inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=spatial_squeeze
)
return logits, end_points
if net_name == 'resnet_v1_101':
with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=cfgs.WEIGHT_DECAY[net_name])):
logits, end_points = resnet_v1.resnet_v1_101(inputs=inputs,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=spatial_squeeze
)
return logits, end_points
示例2: encoder
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例3: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'scale':True,
'is_training': phase_train,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ]
}
with tf.variable_scope('Resface'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=prelu,
normalizer_fn=slim.batch_norm,
#normalizer_fn=None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.conv2d], kernel_size=3):
return resface20(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
示例4: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
with tf.variable_scope('LResnetE_IR'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=None, #default no biases
activation_fn=None,
normalizer_fn=None
):
with slim.arg_scope([slim.conv2d], kernel_size=3):
with slim.arg_scope([slim.batch_norm],
decay=0.995,
epsilon=1e-5,
scale=True,
is_training=phase_train,
activation_fn=prelu,
updates_collections=None,
variables_collections=[ tf.GraphKeys.TRAINABLE_VARIABLES ]
):
return LResnet50E_IR(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
示例5: _image_to_head
def _image_to_head(self, is_training, reuse=None):
# Base bottleneck
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
net_conv = self._image
if cfg.MOBILENET.FIXED_LAYERS > 0:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
starting_layer=0,
depth_multiplier=self._depth_multiplier,
reuse=reuse,
scope=self._scope)
if cfg.MOBILENET.FIXED_LAYERS < 12:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
starting_layer=cfg.MOBILENET.FIXED_LAYERS,
depth_multiplier=self._depth_multiplier,
reuse=reuse,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
return net_conv
示例6: _image_to_head
def _image_to_head(self, is_training, reuse=None):
assert (0 <= cfg.RESNET.FIXED_BLOCKS <= 3)
# Now the base is always fixed during training
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv = self._build_base()
if cfg.RESNET.FIXED_BLOCKS > 0:
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
if cfg.RESNET.FIXED_BLOCKS < 3:
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[cfg.RESNET.FIXED_BLOCKS:-1],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
return net_conv
示例7: mobilenet_v1_arg_scope
def mobilenet_v1_arg_scope(is_training=True,
stddev=0.09):
batch_norm_params = {
'is_training': False,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
'trainable': False,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
trainable=is_training,
weights_initializer=weights_init,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
示例8: resnet_arg_scope
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
# NOTE 'is_training' here does not work because inside resnet it gets reset:
# https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': cfg.RESNET.BN_TRAIN,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
trainable=is_training,
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
示例9: content_extractor
def content_extractor(self, images, reuse=False):
# images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
if images.get_shape()[3] == 1:
# For mnist dataset, replicate the gray scale image 3 times.
images = tf.image.grayscale_to_rgb(images)
with tf.variable_scope('content_extractor', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
net = slim.conv2d(images, 64, [3, 3], scope='conv1') # (batch_size, 16, 16, 64)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2') # (batch_size, 8, 8, 128)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3') # (batch_size, 4, 4, 256)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 128)
net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
if self.mode == 'pretrain':
net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
net = slim.flatten(net)
return net
示例10: conv_tower_fn
def conv_tower_fn(self, images, is_training=True, reuse=None):
"""Computes convolutional features using the InceptionV3 model.
Args:
images: A tensor of shape [batch_size, height, width, channels].
is_training: whether is training or not.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of
output feature map and N is number of output features (depends on the
network architecture).
"""
mparams = self._mparams['conv_tower_fn']
logging.debug('Using final_endpoint=%s', mparams.final_endpoint)
with tf.variable_scope('conv_tower_fn/INCE'):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope(inception.inception_v3_arg_scope()):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint)
return net
示例11: decoder
def decoder(self, latent_var, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
net = tf.reshape(net, [-1,4,4,256], name='Reshape')
net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
return net
示例12: factory_fn
def factory_fn(image, reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=False):
with slim.arg_scope([slim.conv2d, slim.fully_connected,
slim.batch_norm, slim.layer_norm],
reuse=reuse):
features, logits = _create_network(
image, reuse=reuse, weight_decay=weight_decay)
return features, logits
示例13: _build_network
def _build_network(self, sess, is_training=True):
# select initializers
if cfg.TRAIN.TRUNCATED:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
else:
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)
# Base bottleneck
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
net_conv = self._image
if cfg.MOBILENET.FIXED_LAYERS > 0:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
starting_layer=0,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
if cfg.MOBILENET.FIXED_LAYERS < 12:
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
net_conv = mobilenet_v1_base(net_conv,
_CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
starting_layer=cfg.MOBILENET.FIXED_LAYERS,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
self._act_summaries.append(net_conv)
self._layers['head'] = net_conv
with tf.variable_scope(self._scope, 'MobilenetV1'):
# build the anchors for the image
self._anchor_component()
# region proposal network
rois = self._region_proposal(net_conv, is_training, initializer)
# region of interest pooling
if cfg.POOLING_MODE == 'crop':
pool5 = self._crop_pool_layer(net_conv, rois, "pool5")
else:
raise NotImplementedError
with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
fc7 = mobilenet_v1_base(pool5,
_CONV_DEFS[12:],
starting_layer=12,
depth_multiplier=self._depth_multiplier,
scope=self._scope)
with tf.variable_scope(self._scope, 'MobilenetV1'):
# average pooling done by reduce_mean
fc7 = tf.reduce_mean(fc7, axis=[1, 2])
# region classification
cls_prob, bbox_pred = self._region_classification(fc7, is_training,
initializer, initializer_bbox)
self._score_summaries.update(self._predictions)
return rois, cls_prob, bbox_pred
示例14: construct_embedding
def construct_embedding(self):
"""Builds a conv -> spatial softmax -> FC adaptation network."""
is_training = self._is_training
normalizer_params = {'is_training': is_training}
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
with slim.arg_scope(
[slim.layers.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
with slim.arg_scope(
[slim.layers.fully_connected],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
# Input to embedder is pre-trained inception output.
net = self._pretrained_output
# Optionally add more conv layers.
for num_filters in self._additional_conv_sizes:
net = slim.layers.conv2d(
net, num_filters, kernel_size=[3, 3], stride=[1, 1])
net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob,
is_training=is_training)
# Take the spatial soft arg-max of the last convolutional layer.
# This is a form of spatial attention over the activations.
# See more here: http://arxiv.org/abs/1509.06113.
net = tf.contrib.layers.spatial_softmax(net)
self.spatial_features = net
# Add fully connected layers.
net = slim.layers.flatten(net)
for fc_hidden_size in self._fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if self._fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob,
is_training=is_training)
# Connect last FC layer to embedding.
net = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
net = tf.nn.l2_normalize(net, dim=1)
return net
示例15: factory_fn
def factory_fn(image, reuse, l2_normalize):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected,
slim.batch_norm, slim.layer_norm],
reuse=reuse):
features, logits = _create_network(
image, num_classes, l2_normalize=l2_normalize,
reuse=reuse, create_summaries=is_training,
weight_decay=weight_decay)
return features, logits