当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.Adagrad方法代码示例

本文整理汇总了Python中keras.optimizers.Adagrad方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.Adagrad方法的具体用法?Python optimizers.Adagrad怎么用?Python optimizers.Adagrad使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.Adagrad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self 
开发者ID:hugochan,项目名称:KATE,代码行数:26,代码来源:deepae.py

示例2: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
开发者ID:madrugado,项目名称:Attention-Based-Aspect-Extraction,代码行数:21,代码来源:optimizers.py

示例3: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_optimizer(config):
    if config.OPTIMIZER == 'SGD':
        return SGD(lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM, clipnorm=config.GRADIENT_CLIP_NORM, nesterov=config.NESTEROV)
    elif config.OPTIMIZER == 'RMSprop':
        return RMSprop(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adagrad':
        return Adagrad(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adadelta':
        return Adadelta(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adam':
        return Adam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM, amsgrad=config.AMSGRAD)
    elif config.OPTIMIZER == 'Adamax':
        return Adamax(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Nadam':
        return Nadam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    else:
        raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER)) 
开发者ID:nearthlab,项目名称:image-segmentation,代码行数:19,代码来源:trainer.py

示例4: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.) 
开发者ID:ClimbsRocks,项目名称:auto_ml,代码行数:19,代码来源:utils_models.py

示例5: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
开发者ID:ruidan,项目名称:IMN-E2E-ABSA,代码行数:21,代码来源:optimizers.py

示例6: optimizors

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def optimizors(random_optimizor):
    if random_optimizor:
        i = random.randint(1,3)
        if i==0:
            opt = optimizers.SGD()
        elif i==1:
            opt= optimizers.RMSprop()
        elif i==2:
            opt= optimizers.Adagrad()
        elif i==3:
            opt = optimizers.Adam()
        elif i==4:
            opt =optimizers.Nadam()
        print(opt)
    else:
        opt= optimizers.Adam()
    return opt 
开发者ID:kk7nc,项目名称:RMDL,代码行数:19,代码来源:BuildModel.py

示例7: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
开发者ID:ruidan,项目名称:DAS,代码行数:21,代码来源:optimizers.py

示例8: get_learning_rate

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr'] 
开发者ID:aetros,项目名称:aetros-cli,代码行数:16,代码来源:KerasCallback.py

示例9: create_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def create_model(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5):
    logging.debug('input_shape {}'.format(input_shape))
    logging.debug('input_shape {}'.format(type(input_shape)))

    # input_shape = (7, 7, 512)

    # Optimizer
    optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum)


    # Model
    model = Sequential()
    model.add(Flatten(input_shape=input_shape))

    model.add(Dense(256, activation=activation))
    model.add(Dropout(dropout_rate))
    model.add(Dense(len(class_names), activation='softmax'))                                        # Binary to Multi classification changes
    # model.add(Dense(1, activation='sigmoid'))

    logging.debug('model summary {}'.format(model.summary()))


    # Compile
    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy', metrics=['accuracy'])                     # Binary to Multi classification changes

    logging.info('optimizer:{}  learn_rate:{}  decay:{}  momentum:{}  activation:{}  dropout_rate:{}'.format(
        optimizer, learn_rate, decay, momentum, activation, dropout_rate))

    return model 
开发者ID:abhishekrana,项目名称:DeepFashion,代码行数:32,代码来源:train_multi_v2.py

示例10: test_adagrad

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def test_adagrad(self):
        print('test Adagrad')
        self.assertTrue(_test_optimizer(Adagrad())) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:5,代码来源:test_optimizers.py

示例11: buildnetwork

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def buildnetwork(self):
        model = Sequential()
        model.add(lstm(20, dropout_W=0.2, input_shape = (self.seq_len, self.n_feature)))
        #model.add(LSTM(20, dropout=0.2, input_shape=(int(self.seq_len), int(self.n_feature))))
        model.add(Dense(1, activation=None))
        model.compile(loss='mean_squared_error', optimizer=Adagrad(lr=0.002,clipvalue=10), metrics=['mean_squared_error'])

        return model 
开发者ID:doncat99,项目名称:StockRecommendSystem,代码行数:10,代码来源:agent.py

示例12: test_adagrad

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:5,代码来源:optimizers_test.py

示例13: lr_normalizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def lr_normalizer(lr, optimizer):
    """Assuming a default learning rate 1, rescales the learning rate
    such that learning rates amongst different optimizers are more or less
    equivalent.

    Parameters
    ----------
    lr : float
        The learning rate.
    optimizer : keras optimizer
        The optimizer. For example, Adagrad, Adam, RMSprop.
    """

    from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop
    from keras.optimizers import Nadam
    from talos.utils.exceptions import TalosModelError

    if optimizer == Adadelta:
        pass
    elif optimizer == SGD or optimizer == Adagrad:
        lr /= 100.0
    elif optimizer == Adam or optimizer == RMSprop:
        lr /= 1000.0
    elif optimizer == Adamax or optimizer == Nadam:
        lr /= 500.0
    else:
        raise TalosModelError(str(optimizer) + " is not supported by lr_normalizer")

    return lr 
开发者ID:autonomio,项目名称:talos,代码行数:31,代码来源:normalizers.py

示例14: compile_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def compile_model(self, loss_name, opt=None):
        print "loss function: ", loss_name
        print "optimizer: ", opt.optimizer
        print "learning_rate: ", opt.lr
        if loss_name == 'mse':
            loss = loss_name

        clipnorm = opt.clipnorm
        optimizer = opt.optimizer
        learning_rate = opt.lr
        if optimizer == 'sgd':
            # let's train the model using SGD + momentum (how original).
            if clipnorm > 0:
                sgd = SGD(lr=learning_rate, clipnorm=clipnorm, decay=1e-6, momentum=0.9, nesterov=True)
            else:
                sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss=loss, optimizer=sgd)
        elif optimizer == 'rmsprop':
            if clipnorm > 0:
                rmsprop = RMSprop(lr=learning_rate, clipnorm=clipnorm, rho=0.9, epsilon=1e-6)
            else:
                rmsprop = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6)
            self.model.compile(loss=loss, optimizer=rmsprop)
        elif optimizer == 'adagrad':
            if clipnorm > 0:
                adagrad = Adagrad(lr=learning_rate, clipnorm=clipnorm, epsilon=1e-06)
            else:
                adagrad = Adagrad(lr=learning_rate, epsilon=1e-06)
            self.model.compile(loss=loss, optimizer=adagrad)
        elif optimizer == 'adma':
            if clipnorm > 0:
                adma = Adam(lr=learning_rate, clipnorm=clipnorm, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
            else:
                adma = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
            self.model.compile(loss=loss, optimizer=adma) 
开发者ID:danieljf24,项目名称:w2vv,代码行数:37,代码来源:w2vv.py

示例15: set_learner

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adagrad [as 别名]
def set_learner(model, learning_rate, learner):

    if learner.lower() == "adagrad":
        model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
    elif learner.lower() == "rmsprop":
        model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
    elif learner.lower() == "adam":
        model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
    else:
        model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')

    return model 
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:14,代码来源:NeuMF_RecommenderWrapper.py


注:本文中的keras.optimizers.Adagrad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。