本文整理汇总了Python中tensorflow.contrib.layers.python.layers.layers.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python layers.dropout方法的具体用法?Python layers.dropout怎么用?Python layers.dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers.python.layers.layers
的用法示例。
在下文中一共展示了layers.dropout方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: inception
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import dropout [as 别名]
def inception():
image = tf.placeholder(tf.float32, [None, 224, 224, 3], 'image')
with slim.arg_scope(inception_arg_scope(is_training=False)):
with variable_scope.variable_scope(
'InceptionV1', 'InceptionV1', [image, 1000], reuse=None) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=False):
net, end_points = inception_v1_base(image, scope=scope)
with variable_scope.variable_scope('Logits'):
net_conv = layers_lib.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
print(net_conv.shape)
return net_conv, image
示例2: vgg_a
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import dropout [as 别名]
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
示例3: vgg_19
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import dropout [as 别名]
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 4, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
示例4: inception_v1
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import dropout [as 别名]
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
示例5: vgg_16
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import dropout [as 别名]
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers_lib.repeat(
inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points