当前位置: 首页>>代码示例>>Python>>正文


Python layers.ConvLSTM2D方法代码示例

本文整理汇总了Python中keras.layers.ConvLSTM2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.ConvLSTM2D方法的具体用法?Python layers.ConvLSTM2D怎么用?Python layers.ConvLSTM2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.ConvLSTM2D方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: convert_weights

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ConvLSTM2D [as 别名]
def convert_weights(layer, weights):
    if layer.__class__.__name__ == 'GRU':
        W = [np.split(w, 3, axis=-1) for w in weights]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D'):
        W = [np.split(w, 4, axis=-1) for w in weights]
        for w in W:
            w[2], w[1] = w[1], w[2]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ == 'Conv2DTranspose':
        return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]]
    return weights 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:14,代码来源:test_topology.py

示例2: load_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ConvLSTM2D [as 别名]
def load_model():
    # use simple CNN structure
    in_shape = (SequenceLength, IMSIZE[0], IMSIZE[1], 3)
    model = Sequential()
    model.add(ConvLSTM2D(32, kernel_size=(7, 7), padding='valid', return_sequences=True, input_shape=in_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(64, kernel_size=(5, 5), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(Dense(320))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    out_shape = model.output_shape
    # print('====Model shape: ', out_shape)
    model.add(Reshape((SequenceLength, out_shape[2] * out_shape[3] * out_shape[4])))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    # model structure summary
    print(model.summary())

    return model 
开发者ID:woodfrog,项目名称:ActionRecognition,代码行数:33,代码来源:LRCN_keras.py

示例3: pie_convlstm_encdec

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ConvLSTM2D [as 别名]
def pie_convlstm_encdec(self):
        '''
        Create an LSTM Encoder-Decoder model for intention estimation
        '''
        #Generate input data. the shapes is (sequence_lenght,length of flattened features)
        encoder_input=input_data=Input(shape=(self._sequence_length,) + self.context_model.output_shape[1:],
                                       name = "encoder_input")
        interm_input = encoder_input

        # Generate Encoder LSTM Unit
        encoder_model = ConvLSTM2D(filters=self._convlstm_num_filters,
                                   kernel_size=self._convlstm_kernel_size,
                                   kernel_regularizer=self._kernel_regularizer,
                                   recurrent_regularizer=self._recurrent_regularizer,
                                   bias_regularizer=self._bias_regularizer,
                                   dropout=self._lstm_dropout,
                                   recurrent_dropout=self._lstm_recurrent_dropout,
                                   return_sequences=False)(interm_input)
        encoder_output = Flatten(name='encoder_flatten')(encoder_model)

        # Generate Decoder LSTM unit
        decoder_input = Input(shape=(self._decoder_seq_length,
                                     self._decoder_input_size),
                              name='decoder_input')
        encoder_vec = RepeatVector(self._decoder_seq_length)(encoder_output)
        decoder_concat_inputs = Concatenate(axis=2)([encoder_vec, decoder_input])

        decoder_model = self.create_lstm_model(name='decoder_network',
                                               r_state = False,
                                               r_sequence=False)(decoder_concat_inputs)

        decoder_dense_output = Dense(self._decoder_dense_output_size,
                                     activation='sigmoid',
                                     name='decoder_dense')(decoder_model)

        decoder_output = decoder_dense_output

        self.train_model = Model(inputs=[encoder_input, decoder_input],
                                 outputs=decoder_output)

        self.train_model.summary()
        return self.train_model 
开发者ID:aras62,项目名称:PIEPredict,代码行数:44,代码来源:pie_intent.py

示例4: test_load_layers

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ConvLSTM2D [as 别名]
def test_load_layers():
    from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input
    from keras.models import Model

    if K.backend() == 'tensorflow' or K.backend() == 'cntk':
        inputs = Input(shape=(10, 20, 20, 1))
    else:
        inputs = Input(shape=(10, 1, 20, 20))
    td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs)
    bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv)
    model = Model(inputs=inputs, outputs=bi_convlstm2d)

    weight_value_tuples = []

    # TimeDistributed Conv2D layer
    # use 'channels_first' data format to check that the function is being called correctly for Conv2D
    # old: (filters, stack_size, kernel_rows, kernel_cols)
    # new: (kernel_rows, kernel_cols, stack_size, filters)
    weight_tensor_td_conv_old = list()
    weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5)))
    weight_tensor_td_conv_old.append(np.zeros((15,)))
    td_conv_layer = model.layers[1]
    td_conv_layer.layer.data_format = 'channels_first'
    weight_tensor_td_conv_new = topology.preprocess_weights_for_loading(
        td_conv_layer,
        weight_tensor_td_conv_old,
        original_keras_version='1')
    symbolic_weights = td_conv_layer.weights
    assert (len(symbolic_weights) == len(weight_tensor_td_conv_new))
    weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new)

    # Bidirectional ConvLSTM2D layer
    # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors.
    weight_tensor_bi_convlstm_old = []
    for j in range(2):  # bidirectional
        for i in range(4):
            weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10)))  # kernel
            weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10)))  # recurrent kernel
            weight_tensor_bi_convlstm_old.append(np.zeros((10,)))  # bias

    bi_convlstm_layer = model.layers[2]
    weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading(
        bi_convlstm_layer,
        weight_tensor_bi_convlstm_old,
        original_keras_version='1')

    symbolic_weights = bi_convlstm_layer.weights
    assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new))
    weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new)

    K.batch_set_value(weight_value_tuples)

    assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0])
    assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1])
    assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0])
    assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1])
    assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2])
    assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3])
    assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4])
    assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5]) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:62,代码来源:test_topology.py


注:本文中的keras.layers.ConvLSTM2D方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。