本文整理汇总了Python中tensorflow.keras.layers.Softmax方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Softmax方法的具体用法?Python layers.Softmax怎么用?Python layers.Softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.Softmax方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_pnet
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def build_pnet(self, input_shape=None):
if input_shape is None:
input_shape = (None, None, 3)
p_inp = Input(input_shape)
p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer)
p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer)
p_layer_out1 = Softmax(axis=3)(p_layer_out1)
p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer)
p_net = Model(p_inp, [p_layer_out2, p_layer_out1])
return p_net
示例2: build
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
# Reduce the tensor to a vector.
if len(output_node.shape) > 2:
output_node = reduction.SpatialReduction().build(hp, output_node)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
output_node = layers.Dense(self.output_shape[-1])(output_node)
if isinstance(self.loss, tf.keras.losses.BinaryCrossentropy):
output_node = layers.Activation(activations.sigmoid,
name=self.name)(output_node)
else:
output_node = layers.Softmax(name=self.name)(output_node)
return output_node
示例3: test_compute_model_performance_multitask_classifier
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def test_compute_model_performance_multitask_classifier(self):
n_data_points = 20
n_features = 1
n_tasks = 2
n_classes = 2
X = np.ones(shape=(n_data_points // 2, n_features)) * -1
X1 = np.ones(shape=(n_data_points // 2, n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y1 = np.concatenate((class_0, class_1))
y2 = np.concatenate((class_1, class_0))
y = np.stack([y1, y2], axis=1)
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_data_points // 2, n_features))
dense = layers.Dense(n_tasks * n_classes)(features)
logits = layers.Reshape((n_tasks, n_classes))(dense)
output = layers.Softmax()(logits)
keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
model = dc.models.KerasModel(
keras_model,
dc.models.losses.SoftmaxCrossEntropy(),
output_types=['prediction', 'loss'],
learning_rate=0.01,
batch_size=n_data_points)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
scores = model.evaluate_generator(
model.default_generator(dataset), [metric], per_task_metrics=True)
scores = list(scores[1].values())
# Loosening atol to see if tests stop failing sporadically
assert np.all(np.isclose(scores, [1.0, 1.0], atol=0.50))
示例4: test_compute_model_performance_singletask_classifier
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def test_compute_model_performance_singletask_classifier(self):
n_data_points = 20
n_features = 10
X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
X1 = np.ones(shape=(int(n_data_points / 2), n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y = np.concatenate((class_0, class_1))
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features,))
dense = layers.Dense(2)(features)
output = layers.Softmax()(dense)
keras_model = tf.keras.Model(inputs=features, outputs=[output])
model = dc.models.KerasModel(
keras_model, dc.models.losses.SoftmaxCrossEntropy(), learning_rate=0.1)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
scores = model.evaluate_generator(
model.default_generator(dataset), [metric], per_task_metrics=True)
scores = list(scores[1].values())
assert np.isclose(scores, [1.0], atol=0.05)
示例5: create_output
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def create_output(self, layer):
return Softmax()(layer)
示例6: _build_graph
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def _build_graph(self):
smile_images = Input(shape=self.input_shape)
stem = chemnet_layers.Stem(self.base_filters)(smile_images)
inceptionA_out = self.build_inception_module(inputs=stem, type="A")
reductionA_out = chemnet_layers.ReductionA(
self.base_filters)(inceptionA_out)
inceptionB_out = self.build_inception_module(
inputs=reductionA_out, type="B")
reductionB_out = chemnet_layers.ReductionB(
self.base_filters)(inceptionB_out)
inceptionC_out = self.build_inception_module(
inputs=reductionB_out, type="C")
avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)
if self.mode == "classification":
logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out)
logits = Reshape((self.n_tasks, self.n_classes))(logits)
if self.n_classes == 2:
output = Activation(activation='sigmoid')(logits)
loss = SigmoidCrossEntropy()
else:
output = Softmax()(logits)
loss = SoftmaxCrossEntropy()
outputs = [output, logits]
output_types = ['prediction', 'loss']
else:
output = Dense(self.n_tasks * 1)(avg_pooling_out)
output = Reshape((self.n_tasks, 1))(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smile_images], outputs=outputs)
return model, loss, output_types
示例7: build_rnet
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def build_rnet(self, input_shape=None):
if input_shape is None:
input_shape = (24, 24, 3)
r_inp = Input(input_shape)
r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp)
r_layer = PReLU(shared_axes=[1, 2])(r_layer)
r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer)
r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer)
r_layer = PReLU(shared_axes=[1, 2])(r_layer)
r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer)
r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer)
r_layer = PReLU(shared_axes=[1, 2])(r_layer)
r_layer = Flatten()(r_layer)
r_layer = Dense(128)(r_layer)
r_layer = PReLU()(r_layer)
r_layer_out1 = Dense(2)(r_layer)
r_layer_out1 = Softmax(axis=1)(r_layer_out1)
r_layer_out2 = Dense(4)(r_layer)
r_net = Model(r_inp, [r_layer_out2, r_layer_out1])
return r_net
示例8: build_onet
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def build_onet(self, input_shape=None):
if input_shape is None:
input_shape = (48, 48, 3)
o_inp = Input(input_shape)
o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp)
o_layer = PReLU(shared_axes=[1, 2])(o_layer)
o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer)
o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
o_layer = PReLU(shared_axes=[1, 2])(o_layer)
o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer)
o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
o_layer = PReLU(shared_axes=[1, 2])(o_layer)
o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer)
o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer)
o_layer = PReLU(shared_axes=[1, 2])(o_layer)
o_layer = Flatten()(o_layer)
o_layer = Dense(256)(o_layer)
o_layer = PReLU()(o_layer)
o_layer_out1 = Dense(2)(o_layer)
o_layer_out1 = Softmax(axis=1)(o_layer_out1)
o_layer_out2 = Dense(4)(o_layer)
o_layer_out3 = Dense(10)(o_layer)
o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1])
return o_net
示例9: darknet19
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def darknet19(inputs):
"""Generate Darknet-19 model for Imagenet classification."""
body = darknet19_body()(inputs)
x = DarknetConv2D(1000, (1, 1))(body)
x = GlobalAveragePooling2D()(x)
logits = Softmax()(x)
return Model(inputs, logits)
示例10: __init__
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def __init__(self,
in_channels,
out_channels,
strides,
groups=32,
num_branches=2,
reduction=16,
min_channels=32,
data_format="channels_last",
**kwargs):
super(SKConvBlock, self).__init__(**kwargs)
self.num_branches = num_branches
self.out_channels = out_channels
self.data_format = data_format
self.axis = get_channel_axis(data_format)
mid_channels = max(in_channels // reduction, min_channels)
self.branches = Concurrent(
stack=True,
data_format=data_format,
name="branches")
for i in range(num_branches):
dilation = 1 + i
self.branches.children.append(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
groups=groups,
data_format=data_format,
name="branch{}".format(i + 2)))
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.fc1 = conv1x1_block(
in_channels=out_channels,
out_channels=mid_channels,
data_format=data_format,
name="fc1")
self.fc2 = conv1x1(
in_channels=mid_channels,
out_channels=(out_channels * num_branches),
data_format=data_format,
name="fc2")
self.softmax = nn.Softmax(axis=self.axis)
示例11: build_fcn
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Softmax [as 别名]
def build_fcn(input_shape,
backbone,
n_classes=4):
"""Helper function to build an FCN model.
Arguments:
backbone (Model): A backbone network
such as ResNetv2 or v1
n_classes (int): Number of object classes
including background.
"""
inputs = Input(shape=input_shape)
features = backbone(inputs)
main_feature = features[0]
features = features[1:]
out_features = [main_feature]
feature_size = 8
size = 2
# other half of the features pyramid
# including upsampling to restore the
# feature maps to the dimensions
# equal to 1/4 the image size
for feature in features:
postfix = "fcn_" + str(feature_size)
feature = conv_layer(feature,
filters=256,
use_maxpool=False,
postfix=postfix)
postfix = postfix + "_up2d"
feature = UpSampling2D(size=size,
interpolation='bilinear',
name=postfix)(feature)
size = size * 2
feature_size = feature_size * 2
out_features.append(feature)
# concatenate all upsampled features
x = Concatenate()(out_features)
# perform 2 additional feature extraction
# and upsampling
x = tconv_layer(x, 256, postfix="up_x2")
x = tconv_layer(x, 256, postfix="up_x4")
# generate the pixel-wise classifier
x = Conv2DTranspose(filters=n_classes,
kernel_size=1,
strides=1,
padding='same',
kernel_initializer='he_normal',
name="pre_activation")(x)
x = Softmax(name="segmentation")(x)
model = Model(inputs, x, name="fcn")
return model