本文整理汇总了Python中tensorflow.contrib.layers.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python layers.dropout方法的具体用法?Python layers.dropout怎么用?Python layers.dropout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: encoder
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def encoder(input_tensor, output_size):
'''Create encoder network.
Args:
input_tensor: a batch of flattened images [batch_size, 28*28]
Returns:
A tensor that expresses the encoder network
'''
net = tf.reshape(input_tensor, [-1, 28, 28, 1])
net = layers.conv2d(net, 32, 5, stride=2)
net = layers.conv2d(net, 64, 5, stride=2)
net = layers.conv2d(net, 128, 5, stride=2, padding='VALID')
net = layers.dropout(net, keep_prob=0.9)
net = layers.flatten(net)
return layers.fully_connected(net, output_size, activation_fn=None)
示例2: _block_output
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5):
with tf.variable_scope('Output'):
net = layers.flatten(net, scope='Flatten')
# 7 x 7 x 512
net = layers.fully_connected(net, 4096, scope='Fc1')
net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1')
# 1 x 1 x 4096
net = layers.fully_connected(net, 4096, scope='Fc2')
net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2')
logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
# 1 x 1 x num_classes
endpoints['Logits'] = logits
return logits
示例3: get_arg_scope
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def get_arg_scope(is_training):
weight_decay_l2 = 0.1
batch_norm_decay = 0.999
batch_norm_epsilon = 0.0001
with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
weights_regularizer = slim.l2_regularizer(weight_decay_l2),
biases_regularizer = slim.l2_regularizer(weight_decay_l2),
weights_initializer = layers.variance_scaling_initializer(),
):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon
}
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training = is_training):
with slim.arg_scope([slim.batch_norm],
**batch_norm_params):
with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
activation_fn = tf.nn.elu,
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_params) as scope:
return scope
示例4: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def forward(self, images, num_classes=None, is_training=True):
assert num_classes is not None, 'num_classes must be given when is_training=True'
# Forward
features, _ = self.backbone(images, is_training=is_training)
# Logits
with tf.variable_scope('classifier'):
features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training)
logit = layers.fully_connected(features_drop, num_classes, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=0.001),
weights_regularizer=layers.l2_regularizer(self.weight_decay),
biases_initializer=None,
scope='fc_classifier')
logits = {}
logits['logits'] = logit
return logits
示例5: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def forward(self, images, num_classes=None, is_training=True):
# Forward
features, end_points = self.backbone(images, is_training=is_training)
# Logits
if is_training:
assert num_classes is not None, 'num_classes must be given when is_training=True'
with tf.variable_scope('classifier'):
features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training)
logit = layers.fully_connected(features_drop, num_classes, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=0.001),
weights_regularizer=layers.l2_regularizer(self.weight_decay),
biases_initializer=None,
scope='fc_classifier')
logits = {}
logits['logits'] = logit
logits['features'] = features
return logits
else:
# for _, var in end_points.items():
# print(var)
return features
示例6: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if(reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
y = tf.concat([decoder_hidden, dec_in_enc], 1)
h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term),)
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:26,代码来源:humanEncoder.py
示例7: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def forward(self, dec_in, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if (reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
h0 = tcl.fully_connected(dec_in_enc, 512, scope="fc3", activation_fn=lrelu,
weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term), )
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:22,代码来源:humanEncoder_ablation.py
示例8: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if (reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
y = tf.concat([decoder_hidden, dec_in_enc], 1)
h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu,
weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 70, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term), )
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:23,代码来源:humanEncoder_cmu.py
示例9: feature_extractor
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def feature_extractor(net, output_dim, cfg):
net = net - 0.5
min_feature_map_size = 4
assert output_dim % (
min_feature_map_size**2) == 0, 'output dim=%d' % output_dim
size = int(net.get_shape()[2])
print('Agent CNN:')
channels = cfg.base_channels
print(' ', str(net.get_shape()))
size /= 2
net = ly.conv2d(
net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu)
print(' ', str(net.get_shape()))
while size > min_feature_map_size:
if size == min_feature_map_size * 2:
channels = output_dim / (min_feature_map_size**2)
else:
channels *= 2
assert size % 2 == 0
size /= 2
net = ly.conv2d(
net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu)
print(' ', str(net.get_shape()))
print('before fc: ', net.get_shape()[1])
net = tf.reshape(net, [-1, output_dim])
net = tf.nn.dropout(net, cfg.dropout_keep_prob)
return net
# Output: float \in [0, 1]
示例10: _build_vgg16
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def _build_vgg16(
inputs,
num_classes=1000,
dropout_keep_prob=0.5,
is_training=True,
scope=''):
"""Blah"""
endpoints = {}
with tf.name_scope(scope, 'vgg16', [inputs]):
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
with arg_scope(
[layers.conv2d, layers.max_pool2d],
stride=1,
padding='SAME'):
net = _block_a(inputs, endpoints, d=64, scope='Scale1')
net = _block_a(net, endpoints, d=128, scope='Scale2')
net = _block_b(net, endpoints, d=256, scope='Scale3')
net = _block_b(net, endpoints, d=512, scope='Scale4')
net = _block_b(net, endpoints, d=512, scope='Scale5')
logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)
endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, endpoints
示例11: _build_vgg19
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def _build_vgg19(
inputs,
num_classes=1000,
dropout_keep_prob=0.5,
is_training=True,
scope=''):
"""Blah"""
endpoints = {}
with tf.name_scope(scope, 'vgg19', [inputs]):
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
with arg_scope(
[layers.conv2d, layers.max_pool2d],
stride=1,
padding='SAME'):
net = _block_a(inputs, endpoints, d=64, scope='Scale1')
net = _block_a(net, endpoints, d=128, scope='Scale2')
net = _block_c(net, endpoints, d=256, scope='Scale3')
net = _block_c(net, endpoints, d=512, scope='Scale4')
net = _block_c(net, endpoints, d=512, scope='Scale5')
logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)
endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, endpoints
示例12: _block_output
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def _block_output(net, endpoints, num_classes=1000, dropout_keep_prob=0.5, scope='Output'):
with tf.variable_scope(scope):
# 8 x 8 x 1536
shape = net.get_shape()
net = layers.avg_pool2d(net, shape[1:3], padding='VALID', scope='Pool1_Global')
endpoints['Output/Pool1'] = net
# 1 x 1 x 1536
net = layers.dropout(net, dropout_keep_prob)
net = layers.flatten(net)
# 1536
net = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
# num classes
endpoints['Logits'] = net
return net
示例13: my_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)}, loss, train_op)
示例14: my_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)}, loss, train_op)
示例15: conv_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import dropout [as 别名]
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op