当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.rmsprop方法代码示例

本文整理汇总了Python中keras.optimizers.rmsprop方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.rmsprop方法的具体用法?Python optimizers.rmsprop怎么用?Python optimizers.rmsprop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.rmsprop方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: GRU64

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def GRU64(n_nodes, conv_len, n_classes, n_feat, in_len,
        optimizer=rmsprop(lr=1e-3), return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(in_len, n_feat))
    model = inputs

    model = CuDNNGRU(64, return_sequences=True)(model)
    model = SpatialDropout1D(0.5)(model)

    model.set_shape((None, in_len, 64))
    model = TimeDistributed(Dense(n_classes, name='fc', activation='softmax'))(model)

    model = Model(inputs=inputs, outputs=model)
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', sample_weight_mode="temporal")

    if return_param_str:
        param_str = "GRU_C{}_L{}".format(conv_len, n_layers)
        return model, param_str
    else:
        return model 
开发者ID:Zephyr-D,项目名称:TCFPN-ISBA,代码行数:23,代码来源:weak_model.py

示例2: test_sequential_model_saving_2

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def test_sequential_model_saving_2():
    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(Dense(3))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname,
                       custom_objects={'custom_opt': custom_opt,
                                       'custom_loss': custom_loss})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:26,代码来源:test_model_saving.py

示例3: dueling_dqn

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def dueling_dqn(input_shape, action_size, learning_rate):

        state_input = Input(shape=(input_shape))
        x = Convolution2D(32, 8, 8, subsample=(4, 4), activation='relu')(state_input)
        x = Convolution2D(64, 4, 4, subsample=(2, 2), activation='relu')(x)
        x = Convolution2D(64, 3, 3, activation='relu')(x)
        x = Flatten()(x)

        # state value tower - V
        state_value = Dense(256, activation='relu')(x)
        state_value = Dense(1, init='uniform')(state_value)
        state_value = Lambda(lambda s: K.expand_dims(s[:, 0], dim=-1), output_shape=(action_size,))(state_value)

        # action advantage tower - A
        action_advantage = Dense(256, activation='relu')(x)
        action_advantage = Dense(action_size)(action_advantage)
        action_advantage = Lambda(lambda a: a[:, :] - K.mean(a[:, :], keepdims=True), output_shape=(action_size,))(action_advantage)

        # merge to state-action value function Q
        state_action_value = merge([state_value, action_advantage], mode='sum')

        model = Model(input=state_input, output=state_action_value)
        #model.compile(rmsprop(lr=learning_rate), "mse")
        adam = Adam(lr=learning_rate)
        model.compile(loss='mse',optimizer=adam)

        return model 
开发者ID:flyyufelix,项目名称:VizDoom-Keras-RL,代码行数:29,代码来源:networks.py

示例4: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def get_optimizer(opt_params, lr):
    """Helper to get optimizer from text params"""
    if opt_params['opt_func'] == 'sgd':
        return SGD(lr=lr, momentum=opt_params['momentum'])
    elif opt_params['opt_func'] == 'adam':
        return Adam(lr=lr)
    elif opt_params['opt_func'] == 'rmsprop':
        return rmsprop(lr=lr)
    else:
        raise ValueError 
开发者ID:developmentseed,项目名称:ml-hv-grid-pub,代码行数:12,代码来源:train_xcept.py

示例5: subactivity_train_lstm

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def subactivity_train_lstm(metadata_root):
    nb_epoch = 100
    nb_classes = 10
    batch_size = 32
    train_path = metadata_root + 'data/train'
    val_path = metadata_root + 'data/val'
    model_path = metadata_root + 'models/cnn/'
    x_train_path = train_path + '/subactivity_lstm_feature_train.npy'
    x_val_path = val_path + '/subactivity_lstm_feature_val.npy'
    y_train_path = train_path + '/subactivity_lstm_gt_train.npy'
    y_val_path = val_path + '/subactivity_lstm_gt_val.npy'
    model_name = 'subactivity_lstm_epoch_100_sequencelen_50.h5'
    print 'loading the data'
    x_train = np.load(x_train_path)
    x_val = np.load(x_val_path)
    y_train = np.load(y_train_path)
    y_val = np.load(y_val_path)
    print 'successful initializing the model'
    final_model = lstm_model(x_train.shape[2], max_len=50)
    optimizer = rmsprop(lr=0.001)
    print 'compiling'
    final_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    print 'saving the model figure'
    plot(final_model, to_file=model_path + model_name[:-3] + '.png', show_shapes=True)
    print 'fitting'
    final_model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
                    validation_data=(x_val, y_val))
    final_model.save(model_path + model_name) 
开发者ID:SiyuanQi,项目名称:grammar-activity-prediction,代码行数:30,代码来源:vgg_fine_tune.py

示例6: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def get_optimizer(opt_params, lr):
    """Helper to get optimizer from text params

    Parameters
    ----------
    opt_params: dict
        Dictionary containing optimization function name and learning rate decay
    lr:  float
        Initial learning rate

    Return
    ------
    opt_function: Keras optimizer
    """

    if opt_params['opt_func'] == 'sgd':
        return SGD(lr=lr, momentum=opt_params['momentum'])
    elif opt_params['opt_func'] == 'adam':
        return Adam(lr=lr)
    elif opt_params['opt_func'] == 'rmsprop':
        return rmsprop(lr=lr)
    elif opt_params['opt_func'] == 'nadam':
        return Nadam(lr=lr)
    elif opt_params['opt_func'] == 'powersign':
        from tensorflow.contrib.opt.python.training import sign_decay as sd
        d_steps = opt_params['pwr_sign_decay_steps']
        # Define the decay function (if specified)
        if opt_params['pwr_sign_decay_func'] == 'lin':
            decay_func = sd.get_linear_decay_fn(d_steps)
        elif opt_params['pwr_sign_decay_func'] == 'cos':
            decay_func = sd.get_consine_decay_fn(d_steps)
        elif opt_params['pwr_sign_decay_func'] == 'res':
            decay_func = sd.get_restart_decay_fn(d_steps,
                                                 num_periods=opt_params['pwr_sign_decay_periods'])
        elif opt_params['decay_func'] is None:
            decay_func = None
        else:
            raise ValueError('decay function not specified correctly')

        # Use decay function in TF optimizer
        return TFOptimizer(PowerSignOptimizer(learning_rate=lr,
                                              sign_decay_fn=decay_func))
    else:
        raise ValueError 
开发者ID:developmentseed,项目名称:ml-hv-grid-pub,代码行数:46,代码来源:train_xcept.py

示例7: test_loading_weights_by_name_and_reshape

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def test_loading_weights_by_name_and_reshape():
    """
    test loading model weights by name on:
        - sequential model
    """

    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse

    # sequential model
    model = Sequential()
    model.add(Conv2D(2, (1, 1), input_shape=(1, 1, 1), name='rick'))
    model.add(Flatten())
    model.add(Dense(3, name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 1, 1, 1))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    old_weights = [layer.get_weights() for layer in model.layers]
    _, fname = tempfile.mkstemp('.h5')

    model.save_weights(fname)

    # delete and recreate model
    del(model)
    model = Sequential()
    model.add(Conv2D(2, (1, 1), input_shape=(1, 1, 1), name='rick'))
    model.add(Conv2D(3, (1, 1), name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    # load weights from first model
    with pytest.raises(ValueError):
        model.load_weights(fname, by_name=True, reshape=False)
    with pytest.raises(ValueError):
        model.load_weights(fname, by_name=False, reshape=False)
    model.load_weights(fname, by_name=False, reshape=True)
    model.load_weights(fname, by_name=True, reshape=True)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(np.squeeze(out), np.squeeze(out2), atol=1e-05)
    for i in range(len(model.layers)):
        new_weights = model.layers[i].get_weights()
        for j in range(len(new_weights)):
            # only compare layers that have weights, skipping Flatten()
            if old_weights[i]:
                assert_allclose(old_weights[i][j], new_weights[j], atol=1e-05) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:53,代码来源:test_model_saving.py

示例8: test_loading_weights_by_name_2

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def test_loading_weights_by_name_2():
    """
    test loading model weights by name on:
        - both sequential and functional api models
        - different architecture with shared names
    """

    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse

    # sequential model
    model = Sequential()
    model.add(Dense(2, input_shape=(3,), name='rick'))
    model.add(Dense(3, name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    old_weights = [layer.get_weights() for layer in model.layers]
    _, fname = tempfile.mkstemp('.h5')

    model.save_weights(fname)

    # delete and recreate model using Functional API
    del(model)
    data = Input(shape=(3,))
    rick = Dense(2, name='rick')(data)
    jerry = Dense(3, name='jerry')(rick)  # add 2 layers (but maintain shapes)
    jessica = Dense(2, name='jessica')(jerry)
    morty = Dense(3, name='morty')(jessica)

    model = Model(inputs=[data], outputs=[morty])
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    # load weights from first model
    model.load_weights(fname, by_name=True)
    os.remove(fname)

    out2 = model.predict(x)
    assert np.max(np.abs(out - out2)) > 1e-05

    rick = model.layers[1].get_weights()
    jerry = model.layers[2].get_weights()
    jessica = model.layers[3].get_weights()
    morty = model.layers[4].get_weights()

    assert_allclose(old_weights[0][0], rick[0], atol=1e-05)
    assert_allclose(old_weights[0][1], rick[1], atol=1e-05)
    assert_allclose(old_weights[1][0], morty[0], atol=1e-05)
    assert_allclose(old_weights[1][1], morty[1], atol=1e-05)
    assert_allclose(np.zeros_like(jerry[1]), jerry[1])  # biases init to 0
    assert_allclose(np.zeros_like(jessica[1]), jessica[1])  # biases init to 0 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:58,代码来源:test_model_saving.py

示例9: test_loading_weights_by_name_skip_mismatch

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import rmsprop [as 别名]
def test_loading_weights_by_name_skip_mismatch():
    """
    test skipping layers while loading model weights by name on:
        - sequential model
    """

    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse

    # sequential model
    model = Sequential()
    model.add(Dense(2, input_shape=(3,), name='rick'))
    model.add(Dense(3, name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    old_weights = [layer.get_weights() for layer in model.layers]
    _, fname = tempfile.mkstemp('.h5')

    model.save_weights(fname)

    # delete and recreate model
    del(model)
    model = Sequential()
    model.add(Dense(2, input_shape=(3,), name='rick'))
    model.add(Dense(4, name='morty'))  # different shape w.r.t. previous model
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    # load weights from first model
    with pytest.warns(UserWarning):  # expect UserWarning for skipping weights
        model.load_weights(fname, by_name=True, skip_mismatch=True)
    os.remove(fname)

    # assert layers 'rick' are equal
    for old, new in zip(old_weights[0], model.layers[0].get_weights()):
        assert_allclose(old, new, atol=1e-05)

    # assert layers 'morty' are not equal, since we skipped loading this layer
    for old, new in zip(old_weights[1], model.layers[1].get_weights()):
        assert_raises(AssertionError, assert_allclose, old, new, atol=1e-05)


# a function to be called from the Lambda layer 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:50,代码来源:test_model_saving.py


注:本文中的keras.optimizers.rmsprop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。