当前位置: 首页>>代码示例>>Python>>正文


Python layers.Masking方法代码示例

本文整理汇总了Python中keras.layers.Masking方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Masking方法的具体用法?Python layers.Masking怎么用?Python layers.Masking使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.Masking方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_audio_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def get_audio_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 50

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
开发者ID:declare-lab,项目名称:MELD,代码行数:21,代码来源:baseline.py

示例2: get_bimodal_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def get_bimodal_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 10

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
开发者ID:declare-lab,项目名称:MELD,代码行数:20,代码来源:baseline.py

示例3: _build_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
开发者ID:Rowl1ng,项目名称:rasa_wechat,代码行数:25,代码来源:mom_example.py

示例4: _build_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import Activation, Masking, Dense, SimpleRNN
        from keras.models import Sequential

        n_hidden = 8  # size of hidden layer in RNN
        # Build Model
        batch_input_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_input_shape))
        model.add(SimpleRNN(n_hidden, batch_input_shape=batch_input_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
开发者ID:Rowl1ng,项目名称:rasa_wechat,代码行数:25,代码来源:policy.py

示例5: _build_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
开发者ID:Rowl1ng,项目名称:rasa_wechat,代码行数:26,代码来源:keras_policy.py

示例6: model_masking

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model 
开发者ID:ragulpr,项目名称:wtte-rnn,代码行数:19,代码来源:test_keras.py

示例7: model_architecture

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def model_architecture(self, num_features, num_actions, max_history_len):
        """Build a Keras model and return a compiled model."""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation("softmax"))

        model.compile(loss="categorical_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])

        logger.debug(model.summary())
        return model 
开发者ID:Ma-Dan,项目名称:rasa_bot,代码行数:23,代码来源:bot.py

示例8: create_network

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def create_network(nb_features, nb_labels, padding_value):

    # Define the network architecture
    input_data = Input(name='input', shape=(None, nb_features)) # nb_features = image height

    masking = Masking(mask_value=padding_value)(input_data)
    noise = GaussianNoise(0.01)(masking)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(noise)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)

    dense = TimeDistributed(Dense(nb_labels + 1, name="dense"))(blstm)
    outrnn = Activation('softmax', name='softmax')(dense)

    network = CTCModel([input_data], [outrnn])
    network.compile(Adam(lr=0.0001))

    return network 
开发者ID:ysoullard,项目名称:CTCModel,代码行数:20,代码来源:example.py

示例9: creat_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def creat_model(input_shape, num_class):

    init = initializers.Orthogonal(gain=args.norm)
    sequence_input =Input(shape=input_shape)
    mask = Masking(mask_value=0.)(sequence_input)
    if args.aug:
        mask = augmentaion()(mask)
    X = Noise(0.075)(mask)
    if args.model[0:2]=='VA':
        # VA
        trans = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
        trans = Dropout(0.5)(trans)
        trans = TimeDistributed(Dense(3,kernel_initializer='zeros'))(trans)
        rot = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
        rot = Dropout(0.5)(rot)
        rot = TimeDistributed(Dense(3,kernel_initializer='zeros'))(rot)
        transform = Concatenate()([rot,trans])
        X = VA()([mask,transform])

    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
    X = Dropout(0.5)(X)
    X = TimeDistributed(Dense(num_class))(X)
    X = MeanOverTime()(X)
    X = Activation('softmax')(X)

    model=Model(sequence_input,X)
    return model 
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:33,代码来源:va-rnn.py

示例10: create_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def create_model(self):
        model = Sequential()
        #model.add(Masking(mask_value=0, input_shape=(1, self.settings.getint("LSTM", "max_vector_length"))))
        model.add(LSTM_CELL(self.settings.getint("LSTM", "hidden_layers"),
                            input_shape=(self.settings.getint("LSTM", "time_series"), self.settings.getint("LSTM", "max_vector_length")),
                            return_sequences=True))
        model.add(LSTM_CELL(self.settings.getint("LSTM", "hidden_layers")))
        model.add(Dropout(self.settings.getfloat("LSTM", "dropout")))
        model.add(Dense(self.settings.getint('LSTM', 'max_vector_length')))

        return model 
开发者ID:morrigan,项目名称:user-behavior-anomaly-detector,代码行数:13,代码来源:lstm.py

示例11: learn_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def learn_model(self, features, labels, degrade_mask, epochs=30, batch_size=None, model=None):
        print('learning model')
        if True or not model and not self.model:
            model = Sequential()
            masking = Masking(mask_value=0.0, input_shape=(features.shape[1], features.shape[2],))
            model.add(masking)
            crf = CRF(#input_shape=(features.shape[1], features.shape[2],),
                      units=labels.shape[-1],
                      sparse_target=False,
                      kernel_regularizer=keras.regularizers.l1_l2(0.0001, 0.0001),
                      #bias_regularizer=keras.regularizers.l2(0.005),
                      #chain_regularizer=keras.regularizers.l2(0.005),
                      #boundary_regularizer=keras.regularizers.l2(0.005),
                      learn_mode='marginal',
                      test_mode='marginal',
                      unroll=self.unroll_flag,
                     )
            model.add(crf)
            model.compile(optimizer=self.opt,
                          loss=crf_loss,
                          #loss=crf.loss_function,
                          metrics=[crf_accuracy],
                          #metrics=[crf.accuracy],
                          )
        elif self.model:
            model = self.model
        else:
            assert model

        #assert features.shape[0] == len(self.degrade_mask)
        #weights = self._weight_logic(features, degrade_mask)

        model.fit(features,
                  labels,
                  epochs=epochs,
                  batch_size=batch_size,
                  verbose=1,
                  #sample_weight=weights,
                  )
        return model 
开发者ID:plastering,项目名称:plastering,代码行数:42,代码来源:char2ir_gpu.py

示例12: assemble_rnn

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def assemble_rnn(params, final_reshape=True):
    """Construct an RNN/LSTM/GRU model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are optional recurrent layers and depend on whether they
    are specified in the params dictionary.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)
    # inputs = layers.Input(batch_shape=[20] + list(input_shape))

    # Masking layer
    previous = layers.Masking(mask_value=0.0)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize(
            {'class_name': layer['name'], 'config': layer['config']})
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(**layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs) 
开发者ID:alshedivat,项目名称:keras-gp,代码行数:34,代码来源:assemble.py

示例13: test_merge_mask_2d

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def test_merge_mask_2d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = layers.Input(shape=(3,))
    input_b = layers.Input(shape=(3,))

    # masks
    masked_a = layers.Masking(mask_value=0)(input_a)
    masked_b = layers.Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = legacy_layers.merge([masked_a, masked_b], mode='sum')
    merged_concat = legacy_layers.merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = legacy_layers.merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = models.Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], epochs=1)

    # test concatenation
    model_concat = models.Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1)

    # test concatenation with masked and non-masked inputs
    model_concat = models.Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], epochs=1) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:32,代码来源:layers_test.py

示例14: test_masking

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def test_masking():
    layer_test(layers.Masking,
               kwargs={},
               input_shape=(3, 2, 3)) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:6,代码来源:core_test.py

示例15: test_masking

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Masking [as 别名]
def test_masking():
    np.random.seed(1337)
    x = np.array([[[1], [1]],
                  [[0], [0]]])
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(2, 1)))
    model.add(TimeDistributed(Dense(1, kernel_initializer='one')))
    model.compile(loss='mse', optimizer='sgd')
    y = np.array([[[1], [1]],
                  [[1], [1]]])
    loss = model.train_on_batch(x, y)
    assert loss == 0 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:14,代码来源:test_loss_masking.py


注:本文中的keras.layers.Masking方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。