当前位置: 首页>>代码示例>>Python>>正文


Python layers.Dense类代码示例

本文整理汇总了Python中keras.layers.Dense的典型用法代码示例。如果您正苦于以下问题:Python Dense类的具体用法?Python Dense怎么用?Python Dense使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Dense类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __build_network

    def __build_network(self):
        embedding_layer = Embedding(
            self.corpus_size,
            EMBEDDING_DIM,
            weights=[self.embedding_matrix],
            input_length=MAX_TITLE_LENGTH,
            trainable=False)
        # train a 1D convnet with global maxpooling
        sequence_input = Input(shape=(MAX_TITLE_LENGTH, ), dtype='int32')
        embedded_sequences = embedding_layer(sequence_input)
        x = LSTM(
            128,
            dropout_W=0.2,
            dropout_U=0.2,
            W_regularizer=regularizers.l2(0.01),
            b_regularizer=regularizers.l2(0.01))(embedded_sequences)
        x = Dropout(0.5)(x)
        preds = Dense(self.class_num, activation='softmax')(x)
        print preds.get_shape()
        if self.optimizer == 'adam':
            self.optimizer = Adam(lr=self.lr)
        if self.optimizer == 'rmsprop':
            self.optimizer = RMSprop(lr=self.lr)

        # rmsprop = RMSprop(lr=self.lr)
        self.model = Model(sequence_input, preds)
        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=self.optimizer,
            metrics=['acc'])
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:30,代码来源:lstm_text_classifier.py

示例2: test_layer_trainability_switch

def test_layer_trainability_switch():
    # with constructor argument, in Sequential
    model = Sequential()
    model.add(Dense(2, trainable=False, input_dim=1))
    assert model.trainable_weights == []

    # by setting the `trainable` argument, in Sequential
    model = Sequential()
    layer = Dense(2, input_dim=1)
    model.add(layer)
    assert model.trainable_weights == layer.trainable_weights
    layer.trainable = False
    assert model.trainable_weights == []

    # with constructor argument, in Model
    x = Input(shape=(1,))
    y = Dense(2, trainable=False)(x)
    model = Model(x, y)
    assert model.trainable_weights == []

    # by setting the `trainable` argument, in Model
    x = Input(shape=(1,))
    layer = Dense(2)
    y = layer(x)
    model = Model(x, y)
    assert model.trainable_weights == layer.trainable_weights
    layer.trainable = False
    assert model.trainable_weights == []
开发者ID:BlakePrice,项目名称:keras,代码行数:28,代码来源:test_dynamic_trainability.py

示例3: __init__

    def __init__(self, memory_cells, query, project_query=False):
        """Define Attention.

        Args:
            memory_cells (SequenceBatch): a SequenceBatch containing a Tensor of shape (batch_size, num_cells, cell_dim)
            query (Tensor): a tensor of shape (batch_size, query_dim).
            project_query (bool): defaults to False. If True, the query goes through an extra projection layer to
                coerce it to cell_dim.
        """
        cell_dim = memory_cells.values.get_shape().as_list()[2]
        if project_query:
            # project the query up/down to cell_dim
            self._projection_layer = Dense(cell_dim, activation='linear')
            query = self._projection_layer(query)  # (batch_size, cand_dim)

        memory_values, memory_mask = memory_cells.values, memory_cells.mask

        # batch matrix multiply to compute logit scores for all choices in all batches
        query = tf.expand_dims(query, 2)  # (batch_size, cell_dim, 1)
        logit_values = tf.batch_matmul(memory_values, query)  # (batch_size, num_cells, 1)
        logit_values = tf.squeeze(logit_values, [2])  # (batch_size, num_cells)

        # set all pad logits to negative infinity
        logits = SequenceBatch(logit_values, memory_mask)
        logits = logits.with_pad_value(-float('inf'))

        # normalize to get probs
        probs = tf.nn.softmax(logits.values)  # (batch_size, num_cells)

        retrieved = tf.batch_matmul(tf.expand_dims(probs, 1), memory_values)  # (batch_size, 1, cell_dim)
        retrieved = tf.squeeze(retrieved, [1])  # (batch_size, cell_dim)

        self._logits = logits.values
        self._probs = probs
        self._retrieved = retrieved
开发者ID:siddk,项目名称:lang2program,代码行数:35,代码来源:model.py

示例4: __build_network

    def __build_network(self):
        embedding_layer = Embedding(self.corpus_size,
                            EMBEDDING_DIM,
                            weights=[self.embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)
        # train a 1D convnet with global maxpooling
        sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        embedded_sequences = embedding_layer(sequence_input)
        # sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        # embedded_sequences = embedding_layer(sequence_input)
        x = Convolution1D(128, 5)(embedded_sequences)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        print "before 256", x.get_shape()
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        print "before 35 ", x.get_shape()
        x = MaxPooling1D(35)(x)
        x = Flatten()(x)
        # print x.shape()

        x = Dense(128, activation='relu')(x)
        print x.get_shape()
        x = Dropout(0.5)(x)
        print x.get_shape()
        preds = Dense(self.class_num, activation='softmax')(x)
        print preds.get_shape()
        # conv_blocks = []
        # for sz in self.filter_sizes:
        #     conv = Convolution1D(filters=self.num_filters, kernel_size=sz, activation="relu", padding='valid', strides=1)(embedded_sequences)
        #     conv = MaxPooling1D(pool_size=2)(conv)
        #     conv = Flatten()(conv)
        #     conv_blocks.append(conv)
        # z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
        # z = Dropout(rate=0.5)(z)
        # z = Dense(units=self.hidden_dims, activation="relu")(z)
        # preds = Dense(self.class_num, activation="softmax")(z)
        rmsprop = RMSprop(lr=0.001)
        self.model = Model(sequence_input, preds)
        self.model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['acc'])
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:47,代码来源:cnn_text_classifier.py

示例5: test_get_losses_for

def test_get_losses_for():
    a = Input(shape=(2,))
    dense_layer = Dense(1)
    dense_layer.add_loss(0, inputs=a)
    dense_layer.add_loss(1, inputs=None)

    assert dense_layer.get_losses_for(a) == [0]
    assert dense_layer.get_losses_for(None) == [1]
开发者ID:95vidhi,项目名称:keras,代码行数:8,代码来源:test_topology.py

示例6: __build_network

 def __build_network(self):
     embedding_layer = Embedding(self.corpus_size,
                         EMBEDDING_DIM,
                         weights=[self.embedding_matrix],
                         input_length=MAX_SEQUENCE_LENGTH,
                         trainable=False)
     # train a 1D convnet with global maxpooling
     sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
     embedded_sequences = embedding_layer(sequence_input)
     x = Convolution1D(self.num_filters, 5, activation="relu")(embedded_sequences)
     x = MaxPooling1D(5)(x)
     x = Convolution1D(self.num_filters, 5, activation="relu")(x)
     x = MaxPooling1D(5)(x)
     x = LSTM(64, dropout_W=0.2, dropout_U=0.2)(x)
     preds = Dense(self.class_num, activation='softmax')(x)
     print preds.get_shape()
     rmsprop = RMSprop(lr=0.01)
     self.model = Model(sequence_input, preds)
     self.model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['acc'])
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:19,代码来源:cnn_lstm_text_classifier.py

示例7: get_ResNet_classifier

def get_ResNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))

    x = conv_bn_relu(inputs, RESNET_INITIAL_FILTERS)

    print('base')
    print(x.get_shape())

    for i in range(RESNET_BLOCKS):
        x = bottleneck(x, shrinkage=(i % RESNET_SHRINKAGE_STEPS == 0))

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model
开发者ID:csyyyyyyy,项目名称:Tianchi-Medical-LungTumorDetect,代码行数:22,代码来源:model_ResNet.py

示例8: Input

recog_left.add(Dense(64,input_shape=(64,),activation='relu'))

recog_right=recog
recog_right.add(Dense(64,input_shape=(64,),activation='relu'))
recog_right.add(Lambda(lambda x: x + K.exp(x / 2) * K.random_normal(shape=(1, 64), mean=0.,
                              std=epsilon_std), output_shape=(64,)))
recog_right.add(Highway())
recog_right.add(Activation('sigmoid'))

recog1=Sequential()
recog1.add(Merge([recog_left,recog_right],mode = 'ave'))
recog1.add(Dense(784))

#### HERE***
recog11=Sequential()
layer=Dense(64,init='glorot_uniform',input_shape=(784,))
layer.trainable=False
recog11.add(layer)
layer2=Dense(784, activation='sigmoid',init='glorot_uniform')
layer2.trainable=False
recog11.add(layer2)
recog11.layers[0].W.set_value(np.ones((784,64)).astype(np.float32))

recog11.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])

recog11.get_weights()[0].shape

gan_input = Input(batch_shape=(1,784))

gan_level2 = recog11(recog1(gan_input))
开发者ID:kcavagnolo,项目名称:ml_fun,代码行数:30,代码来源:keras_freeze_layer_weights.py

示例9: test_node_construction

def test_node_construction():
    ####################################################
    # test basics

    a = Input(shape=(32,), name='input_a')
    b = Input(shape=(32,), name='input_b')

    assert a._keras_shape == (None, 32)
    a_layer, a_node_index, a_tensor_index = a._keras_history
    b_layer, b_node_index, b_tensor_index = b._keras_history
    assert len(a_layer._inbound_nodes) == 1
    assert a_tensor_index is 0
    node = a_layer._inbound_nodes[a_node_index]
    assert node.outbound_layer == a_layer

    assert isinstance(node.inbound_layers, list)
    assert node.inbound_layers == []
    assert isinstance(node.input_tensors, list)
    assert node.input_tensors == [a]
    assert isinstance(node.input_masks, list)
    assert node.input_masks == [None]
    assert isinstance(node.input_shapes, list)
    assert node.input_shapes == [(None, 32)]

    assert isinstance(node.output_tensors, list)
    assert node.output_tensors == [a]
    assert isinstance(node.output_shapes, list)
    assert node.output_shapes == [(None, 32)]
    assert isinstance(node.output_masks, list)
    assert node.output_masks == [None]

    dense = Dense(16, name='dense_1')
    a_2 = dense(a)
    b_2 = dense(b)

    assert len(dense._inbound_nodes) == 2
    assert len(dense._outbound_nodes) == 0
    assert dense._inbound_nodes[0].inbound_layers == [a_layer]
    assert dense._inbound_nodes[0].outbound_layer == dense
    assert dense._inbound_nodes[1].inbound_layers == [b_layer]
    assert dense._inbound_nodes[1].outbound_layer == dense

    assert dense._inbound_nodes[0].input_tensors == [a]
    assert dense._inbound_nodes[1].input_tensors == [b]

    assert dense._inbound_nodes[0].get_config()['inbound_layers'] == ['input_a']
    assert dense._inbound_nodes[1].get_config()['inbound_layers'] == ['input_b']

    # test layer properties
    test_layer = Dense(16, name='test_layer')
    a_test = test_layer(a)
    assert K.int_shape(test_layer.kernel) == (32, 16)
    assert test_layer.input == a
    assert test_layer.output == a_test
    assert test_layer.input_mask is None
    assert test_layer.output_mask is None
    assert test_layer.input_shape == (None, 32)
    assert test_layer.output_shape == (None, 16)

    with pytest.raises(AttributeError):
        dense.input
    with pytest.raises(AttributeError):
        dense.output
    with pytest.raises(AttributeError):
        dense.input_mask
    with pytest.raises(AttributeError):
        dense.output_mask

    assert dense.get_input_at(0) == a
    assert dense.get_input_at(1) == b
    assert dense.get_output_at(0) == a_2
    assert dense.get_output_at(1) == b_2
    assert dense.get_input_shape_at(0) == (None, 32)
    assert dense.get_input_shape_at(1) == (None, 32)
    assert dense.get_output_shape_at(0) == (None, 16)
    assert dense.get_output_shape_at(1) == (None, 16)
    assert dense.get_input_mask_at(0) is None
    assert dense.get_input_mask_at(1) is None
    assert dense.get_output_mask_at(0) is None
    assert dense.get_output_mask_at(1) is None
开发者ID:95vidhi,项目名称:keras,代码行数:80,代码来源:test_topology.py

示例10: Attention

class Attention(Model):
    """Implements standard attention.

    Given some memory, a memory mask and a query, outputs the weighted memory cells.
    """

    def __init__(self, memory_cells, query, project_query=False):
        """Define Attention.

        Args:
            memory_cells (SequenceBatch): a SequenceBatch containing a Tensor of shape (batch_size, num_cells, cell_dim)
            query (Tensor): a tensor of shape (batch_size, query_dim).
            project_query (bool): defaults to False. If True, the query goes through an extra projection layer to
                coerce it to cell_dim.
        """
        cell_dim = memory_cells.values.get_shape().as_list()[2]
        if project_query:
            # project the query up/down to cell_dim
            self._projection_layer = Dense(cell_dim, activation='linear')
            query = self._projection_layer(query)  # (batch_size, cand_dim)

        memory_values, memory_mask = memory_cells.values, memory_cells.mask

        # batch matrix multiply to compute logit scores for all choices in all batches
        query = tf.expand_dims(query, 2)  # (batch_size, cell_dim, 1)
        logit_values = tf.batch_matmul(memory_values, query)  # (batch_size, num_cells, 1)
        logit_values = tf.squeeze(logit_values, [2])  # (batch_size, num_cells)

        # set all pad logits to negative infinity
        logits = SequenceBatch(logit_values, memory_mask)
        logits = logits.with_pad_value(-float('inf'))

        # normalize to get probs
        probs = tf.nn.softmax(logits.values)  # (batch_size, num_cells)

        retrieved = tf.batch_matmul(tf.expand_dims(probs, 1), memory_values)  # (batch_size, 1, cell_dim)
        retrieved = tf.squeeze(retrieved, [1])  # (batch_size, cell_dim)

        self._logits = logits.values
        self._probs = probs
        self._retrieved = retrieved

    @property
    def logits(self):
        return self._logits  # (batch_size, num_cells)

    @property
    def probs(self):
        return self._probs  # (batch_size, num_cells)

    @property
    def retrieved(self):
        return self._retrieved  # (batch_size, cell_dim)

    @property
    def projection_weights(self):
        """Get projection weights.

        Returns:
            (np.array, np.array): a pair of numpy arrays, (W, b) used to project the query tensor to
                match the predicate embedding dimension.
        """
        return self._projection_layer.get_weights()

    @projection_weights.setter
    def projection_weights(self, value):
        W, b = value
        self._projection_layer.set_weights([W, b])
开发者ID:siddk,项目名称:lang2program,代码行数:68,代码来源:model.py

示例11: lstm_layer

_y1 = lstm_layer(embedded_sequences_2)
y1 = lstm_layer2(_y1)

merged = concatenate([x1, y1])
merged = Dropout(rate_drop_dense)(merged)
merged = BatchNormalization()(merged)

merged = Dense(num_dense, activation=act)(merged)
merged = Dropout(rate_drop_dense)(merged)

merged = Dense(num_dense, activation=act)(merged)
merged = Dropout(rate_drop_dense)(merged)

merged = BatchNormalization()(merged)

preds = Dense(1, activation='sigmoid')(merged)

########################################
## add class weight
########################################
if re_weight:
    class_weight = {0: 1.309028344, 1: 0.472001959}
else:
    class_weight = None

########################################
## train the model
########################################
model = Model(inputs=[sequence_1_input, sequence_2_input], \
        outputs=preds)
model.compile(loss='binary_crossentropy',
开发者ID:gtesei,项目名称:fast-furious,代码行数:31,代码来源:blstm.py

示例12: add_top_layers

def add_top_layers(model, image_size, patch_net='resnet50', block_type='resnet', 
                   depths=[512,512], repetitions=[1,1], 
                   block_fn=bottleneck_org, nb_class=2, 
                   shortcut_with_bn=True, bottleneck_enlarge_factor=4,
                   dropout=.0, weight_decay=.0001,
                   add_heatmap=False, avg_pool_size=(7,7), return_heatmap=False,
                   add_conv=True, add_shortcut=False,
                   hm_strides=(1,1), hm_pool_size=(5,5),
                   fc_init_units=64, fc_layers=2):

    def add_residual_blocks(block):
        for depth,repetition in zip(depths, repetitions):
            block = _residual_block(
                block_fn, depth, repetition,
                dropout=dropout, weight_decay=weight_decay,
                shortcut_with_bn=shortcut_with_bn,
                bottleneck_enlarge_factor=bottleneck_enlarge_factor)(block)
        pool = GlobalAveragePooling2D()(block)
        dropped = Dropout(dropout)(pool)
        return dropped

    def add_vgg_blocks(block):
        for depth,repetition in zip(depths, repetitions):
            block = _vgg_block(depth, repetition,
                               dropout=dropout, 
                               weight_decay=weight_decay)(block)
        pool = GlobalAveragePooling2D()(block)
        dropped = Dropout(dropout)(pool)
        return dropped
    
    def add_fc_layers(block):
        flattened = Flatten()(block)
        dropped = Dropout(dropout)(flattened)
        units=fc_init_units
        for i in xrange(fc_layers):
            fc = Dense(units, kernel_initializer="he_normal", 
                       kernel_regularizer=l2(weight_decay))(dropped)
            norm = BatchNormalization()(fc)
            relu = Activation('relu')(norm)
            dropped = Dropout(dropout)(relu)
            units /= 2
        return dropped, flattened

    if patch_net == 'resnet50':
        last_kept_layer = model.layers[-5]
    elif patch_net == 'yaroslav':
        last_kept_layer = model.layers[-3]
    else:
        last_kept_layer = model.layers[-4]
    block = last_kept_layer.output
    channels = 1 if patch_net == 'yaroslav' else 3
    image_input = Input(shape=(image_size[0], image_size[1], channels))
    model0 = Model(inputs=model.inputs, outputs=block)
    block = model0(image_input)
    if add_heatmap or return_heatmap:  # add softmax heatmap.
        pool1 = AveragePooling2D(pool_size=avg_pool_size, 
                                 strides=hm_strides)(block)
        if return_heatmap:
            dropped = pool1
        else:
            dropped = Dropout(dropout)(pool1)
        clf_layer = model.layers[-1]
        clf_weights = clf_layer.get_weights()
        clf_classes = clf_layer.output_shape[1]
        if return_heatmap:
            activation = activations.softmax(x, axis=CHANNEL_AXIS)
        else:
            activation = 'relu'
        heatmap_layer = Dense(clf_classes, activation=activation, 
                              kernel_regularizer=l2(weight_decay))
        heatmap = heatmap_layer(dropped)
        heatmap_layer.set_weights(clf_weights)
        if return_heatmap:
            model_heatmap = Model(inputs=image_input, outputs=heatmap)
            return model_heatmap
        block = MaxPooling2D(pool_size=hm_pool_size)(heatmap)
        top_layer_nb = 8
    else:
        top_layer_nb = 2
    if add_conv:
        if block_type == 'resnet':
            block = add_residual_blocks(block)
        elif block_type == 'vgg':
            block = add_vgg_blocks(block)
        else:
            raise Exception('Unsupported block type: ' + block_type)
    else:
        block, flattened = add_fc_layers(block)
    if add_shortcut and not add_conv:
        dense = Dense(nb_class, kernel_initializer="he_normal", 
                      kernel_regularizer=l2(weight_decay))(block)
        shortcut = Dense(nb_class, kernel_initializer="he_normal", 
                         kernel_regularizer=l2(weight_decay))(flattened)
        addition = add([dense, shortcut])
        dense = Activation('softmax')(addition)
    else:
        dense = Dense(nb_class, kernel_initializer="he_normal", 
                      activation='softmax', 
                      kernel_regularizer=l2(weight_decay))(block)
    model_addtop = Model(inputs=image_input, outputs=dense)
#.........这里部分代码省略.........
开发者ID:liuyiaaa,项目名称:end2end-all-conv,代码行数:101,代码来源:dm_resnet.py

示例13: pop_layer

model.add(Dropout(0.5,trainable='False'))
model.add(Dense(10,trainable='False'))
model.add(Activation('softmax',trainable='False'))

# LOADING WEIGHTS TO FINE-TUNNE THEM
model.load_weights(weights_path)

pop_layer(model)
pop_layer(model)

# for layer in model.layers:
#   layer.trainable= False

nb_classes=13

layer_last=Dense(nb_classes)
layer_last.trainable=True

layer_last2=Activation('softmax')
layer_last2.trainable=True

model.add(layer_last)
model.add(layer_last2)

print(model.summary())


# let's train the model using SGD + momentum (how original).
#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer="sgd",
开发者ID:telecombcn-dl,项目名称:dlcv04,代码行数:31,代码来源:cifar10_cnn_finetunning.py

示例14: Sequential

###Test parameters:
sample_width = 5
nb_train_samples = 20000
nb_test_samples = 1000

###Making the layers:
labels = tf.placeholder(tf.float32, shape=(None,1))
features = tf.placeholder(tf.float32, shape=(None,sample_width))

from keras.models import Sequential
from keras.layers import Dense
import random

model = Sequential()
first_layer = Dense(20, activation='sigmoid', input_shape=(None,sample_width))
first_layer.set_input(features)
model.add(first_layer)
model.add(Dense(1, activation='sigmoid'))
output_layer = model.output

###making training data & test data:

train_features = np.random.randn(nb_train_samples, sample_width)
train_labels = np.zeros(nb_train_samples).reshape(nb_train_samples, 1)
test_features = np.random.randn(nb_test_samples, sample_width)
test_labels = np.zeros(nb_test_samples).reshape(nb_test_samples, 1)

train_ones = 0
test_ones = 0
开发者ID:evancofer,项目名称:keras_examples,代码行数:29,代码来源:randomDataExample.py

示例15: Input

recog_right=recog
recog_right.add(Dense(64,input_shape=(64,),activation='relu'))
recog_right.add(Lambda(lambda x: x + K.exp(x / 2) * K.random_normal(shape=(1, 64), mean=0.,
                              std=epsilon_std), output_shape=(64,)))
recog_right.add(Highway())
recog_right.add(Activation('sigmoid'))

recog1=Sequential()
recog1.add(Merge([recog_left,recog_right],mode = 'ave'))
recog1.add(Dense(784))
recog1.add(Activation('relu'))

#### GATE***
recog11=Sequential()
layer=Dense(2,init='glorot_uniform',input_shape=(784,))
layer.trainable=False
recog11.add(layer)
layer2=Dense(784, activation='sigmoid',init='glorot_uniform')
layer2.trainable=True
recog11.add(layer2)
recog11.layers[0].W.set_value(np.ones((784,2)).astype(np.float32))

recog11.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])

recog11.get_weights()[0].shape

gan_input = Input(batch_shape=(1,784))

gan_level2 = recog11(recog1(gan_input))
开发者ID:kcavagnolo,项目名称:ml_fun,代码行数:29,代码来源:autoencoder_dimensionality.py


注:本文中的keras.layers.Dense类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。