当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.RMSprop方法代码示例

本文整理汇总了Python中keras.optimizers.RMSprop方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.RMSprop方法的具体用法?Python optimizers.RMSprop怎么用?Python optimizers.RMSprop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.RMSprop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        prediction = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(prediction * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # 상태가 입력, 큐함수가 출력인 인공신경망 생성 
开发者ID:rlcode,项目名称:reinforcement-learning-kr,代码行数:23,代码来源:breakout_dqn.py

示例2: actor_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantages = K.placeholder(shape=[None, ])

        policy = self.actor.output

        # 정책 크로스 엔트로피 오류함수
        action_prob = K.sum(action * policy, axis=1)
        cross_entropy = K.log(action_prob + 1e-10) * advantages
        cross_entropy = -K.sum(cross_entropy)

        # 탐색을 지속적으로 하기 위한 엔트로피 오류
        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
        entropy = K.sum(entropy)

        # 두 오류함수를 더해 최종 오류함수를 만듬
        loss = cross_entropy + 0.01 * entropy

        optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
        updates = optimizer.get_updates(self.actor.trainable_weights, [],loss)
        train = K.function([self.actor.input, action, advantages],
                           [loss], updates=updates)
        return train

    # 가치신경망을 업데이트하는 함수 
开发者ID:rlcode,项目名称:reinforcement-learning-kr,代码行数:27,代码来源:breakout_a3c.py

示例3: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:24,代码来源:breakout_ddqn.py

示例4: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:24,代码来源:breakout_dqn.py

示例5: actor_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantages = K.placeholder(shape=[None, ])

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * advantages
        actor_loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
        entropy = K.sum(entropy)

        loss = actor_loss + 0.01*entropy
        optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
        train = K.function([self.actor.input, action, advantages], [loss], updates=updates)

        return train

    # make loss function for Value approximation 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:23,代码来源:breakout_a3c.py

示例6: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:25,代码来源:breakout_dueling_ddqn.py

示例7: SiameseNetwork

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def SiameseNetwork(input_shape=(5880,)):
    base_network = create_base_network(input_shape)

    input_a = Input(shape=input_shape)
    input_b = Input(shape=input_shape)

    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance,
                  output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model = Model([input_a, input_b], distance)

    rms = RMSprop()
    model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
    
    return model, base_network 
开发者ID:ericzhao28,项目名称:DogEmbeddings,代码行数:20,代码来源:siamese.py

示例8: buildmodel

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def buildmodel():
	print("Model building begins")

	model = Sequential()
	keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)

	S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input')
	h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S)
	h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0)
	h2 = Flatten()(h1)
	h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2)
	P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
	V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)

	model = Model(inputs = S, outputs = [P,V])
	rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
	model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
	return model

#function to preprocess an image before giving as input to the neural network 
开发者ID:shalabhsingh,项目名称:A3C_Keras_FlappyBird,代码行数:22,代码来源:train_network.py

示例9: model_masking

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model 
开发者ID:ragulpr,项目名称:wtte-rnn,代码行数:19,代码来源:test_keras.py

示例10: test_train

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def test_train(self):
        train = pd.read_csv("/input/tests/data/train.csv")

        x_train = train.iloc[:,1:].values.astype('float32')
        y_train = to_categorical(train.iloc[:,0].astype('int32'))

        model = Sequential()
        model.add(Dense(units=10, input_dim=784, activation='softmax'))

        model.compile(
            loss='categorical_crossentropy',
            optimizer=RMSprop(lr=0.001),
            metrics=['accuracy'])

        model.fit(x_train, y_train, epochs=1, batch_size=32)

    # Uses convnet which depends on libcudnn when running on GPU 
开发者ID:Kaggle,项目名称:docker-python,代码行数:19,代码来源:test_keras.py

示例11: __init__

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def __init__(self, action_space, batch_size=32, screen=(84, 84), swap_freq=200):
        from keras.optimizers import RMSprop
        # -----
        self.screen = screen
        self.input_depth = 1
        self.past_range = 3
        self.observation_shape = (self.input_depth * self.past_range,) + self.screen
        self.batch_size = batch_size

        self.action_value = build_network(self.observation_shape, action_space.n)
        self.action_value.compile(optimizer=RMSprop(clipnorm=1.), loss='mse')

        self.losses = deque(maxlen=25)
        self.q_values = deque(maxlen=25)
        self.swap_freq = swap_freq
        self.swap_counter = self.swap_freq
        self.unroll = np.arange(self.batch_size)
        self.frames = 0 
开发者ID:Grzego,项目名称:async-rl,代码行数:20,代码来源:train.py

示例12: __init__

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def __init__(self, action_space, batch_size=32, screen=(84, 84), swap_freq=200):
        from keras.optimizers import RMSprop		
        # -----
        self.screen = screen
        self.input_depth = 1
        self.past_range = 3
        self.observation_shape = (self.input_depth * self.past_range,) + self.screen
        self.batch_size = batch_size

        _, _, self.train_net, adventage = build_network(self.observation_shape, action_space.n)

        self.train_net.compile(optimizer=RMSprop(epsilon=0.1, rho=0.99),
                               loss=[value_loss(), policy_loss(adventage, args.beta)])

        self.pol_loss = deque(maxlen=25)
        self.val_loss = deque(maxlen=25)
        self.values = deque(maxlen=25)
        self.entropy = deque(maxlen=25)
        self.swap_freq = swap_freq
        self.swap_counter = self.swap_freq
        self.unroll = np.arange(self.batch_size)
        self.targets = np.zeros((self.batch_size, action_space.n))
        self.counter = 0 
开发者ID:Grzego,项目名称:async-rl,代码行数:25,代码来源:train.py

示例13: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
开发者ID:madrugado,项目名称:Attention-Based-Aspect-Extraction,代码行数:21,代码来源:optimizers.py

示例14: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def get_optimizer(config):
    if config.OPTIMIZER == 'SGD':
        return SGD(lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM, clipnorm=config.GRADIENT_CLIP_NORM, nesterov=config.NESTEROV)
    elif config.OPTIMIZER == 'RMSprop':
        return RMSprop(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adagrad':
        return Adagrad(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adadelta':
        return Adadelta(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adam':
        return Adam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM, amsgrad=config.AMSGRAD)
    elif config.OPTIMIZER == 'Adamax':
        return Adamax(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Nadam':
        return Nadam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    else:
        raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER)) 
开发者ID:nearthlab,项目名称:image-segmentation,代码行数:19,代码来源:trainer.py

示例15: fit_new

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import RMSprop [as 别名]
def fit_new(self, x, y=None):
        timesteps = x.shape[1]
        input_dim = x.shape[2]
        self.ae = Sequential()
        self.ae.add(Dense(self.latent_dim,
                    input_shape=(timesteps,input_dim,),
                    activation='relu',
                    name='enc'))
        self.ae.add(Dropout(0.2))
        self.ae.add(Dense(input_dim,
                    activation='softmax',
                    name='dec'))

        self.encoder = Model(inputs=self.ae.input,
                             outputs=self.ae.get_layer('enc').output)
        #rmsprop = RMSprop(lr=0.05)
        self.ae.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'],)
        self.ae.fit(x, x, epochs=1) 
开发者ID:plastering,项目名称:plastering,代码行数:22,代码来源:ir2tagsets_seq.py


注:本文中的keras.optimizers.RMSprop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。