本文整理汇总了Python中tensorflow.contrib.layers.python.layers.layers.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python layers.softmax方法的具体用法?Python layers.softmax怎么用?Python layers.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers.python.layers.layers
的用法示例。
在下文中一共展示了layers.softmax方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: network_softmax1
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import softmax [as 别名]
def network_softmax1():
x = tf.placeholder(tf.float32, shape=[6, 64, 64, 3], name="x")
return tf.nn.softmax(x)
示例2: network_softmax2_old
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import softmax [as 别名]
def network_softmax2_old():
x = tf.placeholder(tf.float32, shape=[6, 64, 64, 3], name="x")
return tf.nn.softmax(x, dim=1)
示例3: network_softmax2
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import softmax [as 别名]
def network_softmax2():
x = tf.placeholder(tf.float32, shape=[6, 64, 64, 3], name="x")
return tf.nn.softmax(x, axis=1)
示例4: network_softmax3
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import softmax [as 别名]
def network_softmax3():
x = tf.placeholder(tf.float32, shape=[6, 64, 64, 3], name="x")
return tf_layers.softmax(x)
示例5: inception_v1
# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import softmax [as 别名]
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points