本文整理汇总了Python中tensorflow.contrib.slim.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python slim.dropout方法的具体用法?Python slim.dropout怎么用?Python slim.dropout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AddDropout
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def AddDropout(self, prev_layer, index):
"""Adds a dropout layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(Do)({\w+})?')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(2))
layer = slim.dropout(
prev_layer, 0.5, is_training=self.is_training, scope=name)
return layer, m.end()
示例2: E
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def E(self, images, is_training = False, reuse=False):
if images.get_shape()[3] == 3:
images = tf.image.rgb_to_grayscale(images)
with tf.variable_scope('encoder',reuse=reuse):
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, padding='VALID'):
net = slim.conv2d(images, 64, 5, scope='conv1')
net = slim.max_pool2d(net, 2, stride=2, scope='pool1')
net = slim.conv2d(net, 128, 5, scope='conv2')
net = slim.max_pool2d(net, 2, stride=2, scope='pool2')
net = tf.contrib.layers.flatten(net)
net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc3')
net = slim.dropout(net, 0.5, is_training=is_training)
net = slim.fully_connected(net, self.hidden_repr_size, activation_fn=tf.tanh,scope='fc4')
# dropout here or not?
#~ net = slim.dropout(net, 0.5, is_training=is_training)
return net
示例3: _arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def _arg_scope(self, is_training, reuse=None):
weight_decay = 0.0
keep_probability = 1.0
batch_norm_params = {
'is_training': is_training,
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope(self._scope, self._scope, reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training) as sc:
return sc
示例4: conv_tower_fn
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def conv_tower_fn(self, images, is_training=True, reuse=None):
"""Computes convolutional features using the InceptionV3 model.
Args:
images: A tensor of shape [batch_size, height, width, channels].
is_training: whether is training or not.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of
output feature map and N is number of output features (depends on the
network architecture).
"""
mparams = self._mparams['conv_tower_fn']
logging.debug('Using final_endpoint=%s', mparams.final_endpoint)
with tf.variable_scope('conv_tower_fn/INCE'):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope(inception.inception_v3_arg_scope()):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint)
return net
示例5: mobilenet_v2_arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def mobilenet_v2_arg_scope(weight_decay, is_training=True, depth_multiplier=1.0, regularize_depthwise=False,
dropout_keep_prob=1.0):
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training, 'center': True, 'scale': True }):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer, depth_multiplier=depth_multiplier):
with slim.arg_scope([slim.dropout], is_training=is_training, keep_prob=dropout_keep_prob) as sc:
return sc
示例6: argscope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def argscope(is_training=None, normalizer_fn=slim.layer_norm):
"""Default TF argscope used for convnet-based grasping models.
Args:
is_training: Whether this argscope is for training or inference.
normalizer_fn: Which conv/fc normalizer to use.
Returns:
Dictionary of argument overrides.
"""
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope:
return scope
示例7: build_predictions
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def build_predictions(self, net, rois, is_training, initializer, initializer_bbox):
# Crop image ROIs
pool5 = self._crop_pool_layer(net, rois, "pool5")
pool5_flat = slim.flatten(pool5, scope='flatten')
# Fully connected layers
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
if is_training:
fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
if is_training:
fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')
# Scores and predictions
cls_score = slim.fully_connected(fc7, self._num_classes, weights_initializer=initializer, trainable=is_training, activation_fn=None, scope='cls_score')
cls_prob = self._softmax_layer(cls_score, "cls_prob")
bbox_prediction = slim.fully_connected(fc7, self._num_classes * 4, weights_initializer=initializer_bbox, trainable=is_training, activation_fn=None, scope='bbox_pred')
return cls_score, cls_prob, bbox_prediction
示例8: head_to_tail
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
Concat_SH = tf.concat([fc7_H[:self.H_num,:], fc7_SH[:self.H_num,:]], 1)
fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH')
fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH')
fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH')
fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH')
Concat_HOS = tf.concat([fc7_H, \
fc7_O, \
fc7_SH,\
fc7_SO, sp], 1)
fc8_HOS = slim.fully_connected(Concat_HOS, self.num_fc, scope='fc8_HOS')
fc8_HOS = slim.dropout(fc8_HOS, keep_prob=0.5, is_training=is_training, scope='dropout8_HOS')
fc9_HOS = slim.fully_connected(fc8_HOS, self.num_fc, scope='fc9_HOS')
fc9_HOS = slim.dropout(fc9_HOS, keep_prob=0.5, is_training=is_training, scope='dropout9_HOS')
return fc9_SH, fc9_HOS
示例9: head_to_tail
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
Concat_SH = tf.concat([fc7_H, fc7_SH], 1)
fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH')
fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH')
fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH')
fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH')
Concat_SO = tf.concat([fc7_O, fc7_SO], 1)
fc8_SO = slim.fully_connected(Concat_SO, self.num_fc, scope='fc8_SO')
fc8_SO = slim.dropout(fc8_SO, keep_prob=0.5, is_training=is_training, scope='dropout8_SO')
fc9_SO = slim.fully_connected(fc8_SO, self.num_fc, scope='fc9_SO')
fc9_SO = slim.dropout(fc9_SO, keep_prob=0.5, is_training=is_training, scope='dropout9_SO')
Concat_SHsp = tf.concat([fc7_H, sp], 1)
Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='Concat_SHsp')
Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp')
fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='fc7_SHsp')
fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp')
return fc9_SH, fc9_SO, fc7_SHsp
示例10: AddDropout
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def AddDropout(self, prev_layer, index, reuse=None):
"""Adds a dropout layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(Do)({\w+})?')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
name = self._GetLayerName(m.group(0), index, m.group(2))
layer = slim.dropout(
prev_layer, 0.5, is_training=self.is_training, scope=name)
return layer, m.end()
示例11: fc_network
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0,
batch_norm_param=None, dropout_ratio=0.0, is_training=None):
if dropout_ratio > 0:
assert(is_training is not None), \
'is_training needs to be defined when trainnig with dropout.'
repr = []
for i, neuron in enumerate(neurons):
init_var = np.sqrt(2.0/neuron)
if batch_norm_param is not None:
x = slim.fully_connected(x, neuron, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=init_var),
weights_regularizer=slim.l2_regularizer(wt_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_param,
biases_initializer=tf.zeros_initializer(),
scope='{:s}_{:d}'.format(name, offset+i))
else:
x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(stddev=init_var),
weights_regularizer=slim.l2_regularizer(wt_decay),
biases_initializer=tf.zeros_initializer(),
scope='{:s}_{:d}'.format(name, offset+i))
if dropout_ratio > 0:
x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training,
scope='{:s}_{:d}'.format('dropout_'+name, offset+i))
repr.append(x)
if num_pred is not None:
init_var = np.sqrt(2.0/num_pred)
x = slim.fully_connected(x, num_pred,
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var),
biases_initializer=tf.zeros_initializer(),
activation_fn=None,
scope='{:s}_pred'.format(name))
return x, repr
示例12: create_inner_block
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def create_inner_block(
incoming, scope, nonlinearity=tf.nn.elu,
weights_initializer=tf.truncated_normal_initializer(1e-3),
bias_initializer=tf.zeros_initializer(), regularizer=None,
increase_dim=False, summarize_activations=True):
n = incoming.get_shape().as_list()[-1]
stride = 1
if increase_dim:
n *= 2
stride = 2
incoming = slim.conv2d(
incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/1")
if summarize_activations:
tf.summary.histogram(incoming.name + "/activations", incoming)
incoming = slim.dropout(incoming, keep_prob=0.6)
incoming = slim.conv2d(
incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
normalizer_fn=None, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/2")
return incoming
示例13: _network_factory
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def _network_factory(weight_decay=1e-8):
def factory_fn(image, reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=False):
with slim.arg_scope([slim.conv2d, slim.fully_connected,
slim.batch_norm, slim.layer_norm],
reuse=reuse):
features, logits = _create_network(
image, reuse=reuse, weight_decay=weight_decay)
return features, logits
return factory_fn
示例14: dropout
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def dropout(x,p=0.7):
x=slim.dropout(x,keep_prob=p)
return x
示例15: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import dropout [as 别名]
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope('squeezenet', [images], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
net = fire_module(net, 16, 64, scope='fire2')
net = fire_module(net, 16, 64, scope='fire3')
net = fire_module(net, 32, 128, scope='fire4')
net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
net = fire_module(net, 32, 128, scope='fire5')
net = fire_module(net, 48, 192, scope='fire6')
net = fire_module(net, 48, 192, scope='fire7')
net = fire_module(net, 64, 256, scope='fire8')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
net = fire_module(net, 64, 256, scope='fire9')
net = slim.dropout(net, keep_probability)
net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10')
net = tf.squeeze(net, [1, 2], name='logits')
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, None