当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.Nadam方法代码示例

本文整理汇总了Python中keras.optimizers.Nadam方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.Nadam方法的具体用法?Python optimizers.Nadam怎么用?Python optimizers.Nadam使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.Nadam方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def get_optimizer(config):
    if config.OPTIMIZER == 'SGD':
        return SGD(lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM, clipnorm=config.GRADIENT_CLIP_NORM, nesterov=config.NESTEROV)
    elif config.OPTIMIZER == 'RMSprop':
        return RMSprop(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adagrad':
        return Adagrad(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adadelta':
        return Adadelta(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adam':
        return Adam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM, amsgrad=config.AMSGRAD)
    elif config.OPTIMIZER == 'Adamax':
        return Adamax(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Nadam':
        return Nadam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    else:
        raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER)) 
开发者ID:nearthlab,项目名称:image-segmentation,代码行数:19,代码来源:trainer.py

示例2: get_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.) 
开发者ID:ClimbsRocks,项目名称:auto_ml,代码行数:19,代码来源:utils_models.py

示例3: iris

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def iris():

    from keras.optimizers import Adam, Nadam
    from keras.losses import logcosh, categorical_crossentropy
    from keras.activations import relu, elu, softmax

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2, 3, 4],
         'batch_size': (2, 30, 10),
         'epochs': [2],
         'dropout': (0, 0.5, 5),
         'weight_regulizer': [None],
         'emb_output_dims':  [None],
         'shapes': ['brick', 'triangle', 0.2],
         'optimizer': [Adam, Nadam],
         'losses': [logcosh, categorical_crossentropy],
         'activation': [relu, elu],
         'last_activation': [softmax]}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:24,代码来源:params.py

示例4: breast_cancer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def breast_cancer():

    from keras.optimizers import Adam, Nadam, RMSprop
    from keras.losses import logcosh, binary_crossentropy
    from keras.activations import relu, elu, sigmoid

    # then we can go ahead and set the parameter space
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2],
         'batch_size': (2, 30, 10),
         'epochs': [50, 100, 150],
         'dropout': (0, 0.5, 5),
         'shapes': ['brick', 'triangle', 'funnel'],
         'optimizer': [Adam, Nadam, RMSprop],
         'losses': [logcosh, binary_crossentropy],
         'activation': [relu, elu],
         'last_activation': [sigmoid]}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:22,代码来源:params.py

示例5: optimizors

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def optimizors(random_optimizor):
    if random_optimizor:
        i = random.randint(1,3)
        if i==0:
            opt = optimizers.SGD()
        elif i==1:
            opt= optimizers.RMSprop()
        elif i==2:
            opt= optimizers.Adagrad()
        elif i==3:
            opt = optimizers.Adam()
        elif i==4:
            opt =optimizers.Nadam()
        print(opt)
    else:
        opt= optimizers.Adam()
    return opt 
开发者ID:kk7nc,项目名称:RMDL,代码行数:19,代码来源:BuildModel.py

示例6: build_network

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model 
开发者ID:snf,项目名称:keras-fractalnet,代码行数:21,代码来源:cifar10_fractal.py

示例7: build_network

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model 
开发者ID:snf,项目名称:keras-fractalnet,代码行数:21,代码来源:cifar100_fractal.py

示例8: makecnn

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def makecnn(learningrate,regular,decay,channel_number):
    #model structure
    model=Sequential()
    model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1),  use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last'))
    model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last'))
    model.add(Flatten())
    model.add(Dropout(0.3))
    model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(Dropout(0.3))

    model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(Dropout(0.3))

    model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay)
    model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall])
    return model 
开发者ID:kiharalab,项目名称:DOVE,代码行数:39,代码来源:Build_Model.py

示例9: test_nadam

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def test_nadam():
    _test_optimizer(optimizers.Nadam()) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:4,代码来源:optimizers_test.py

示例10: titanic

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def titanic():

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16],
         'batch_size': [20, 30, 40],
         'dropout': (0, 0.5, 5),
         'optimizer': ['Adam', 'Nadam'],
         'losses': ['logcosh', 'binary_crossentropy'],
         'activation': ['relu', 'elu'],
         'last_activation': ['sigmoid']}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:15,代码来源:params.py

示例11: lr_normalizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def lr_normalizer(lr, optimizer):
    """Assuming a default learning rate 1, rescales the learning rate
    such that learning rates amongst different optimizers are more or less
    equivalent.

    Parameters
    ----------
    lr : float
        The learning rate.
    optimizer : keras optimizer
        The optimizer. For example, Adagrad, Adam, RMSprop.
    """

    from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop
    from keras.optimizers import Nadam
    from talos.utils.exceptions import TalosModelError

    if optimizer == Adadelta:
        pass
    elif optimizer == SGD or optimizer == Adagrad:
        lr /= 100.0
    elif optimizer == Adam or optimizer == RMSprop:
        lr /= 1000.0
    elif optimizer == Adamax or optimizer == Nadam:
        lr /= 500.0
    else:
        raise TalosModelError(str(optimizer) + " is not supported by lr_normalizer")

    return lr 
开发者ID:autonomio,项目名称:talos,代码行数:31,代码来源:normalizers.py

示例12: optimizers

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def optimizers(self, optimizers='auto'):

        '''If `optimizers='auto'` then optimizers will be picked based on
        automatically. Otherwise input a list with one or
        more optimizers will be used.
        '''

        if optimizers == 'auto':
            self._append_params('optimizer', [Adam, Nadam, Adadelta, SGD])
        else:
            self._append_params('optimizer', optimizers) 
开发者ID:autonomio,项目名称:talos,代码行数:13,代码来源:autoparams.py

示例13: _compile

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def _compile(self, model, loss_function, optimizer, lr=0.01, decay=0.0, clipnorm=0.0):
        """Compiles a model specified with Keras.

        See https://keras.io/optimizers/ for more info on each optimizer.

        Args:
            model: Keras model object to compile
            loss_function: Keras loss_function object to compile model with
            optimizer (str): the optimizer to use during training
            lr (float): learning rate to use during training
            decay (float): per epoch decay rate
            clipnorm (float): gradient normalization threshold
        """
        # The parameters of these optimizers can be freely tuned.
        if optimizer == 'sgd':
            optimizer_ = optimizers.SGD(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adam':
            optimizer_ = optimizers.Adam(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adamax':
            optimizer_ = optimizers.Adamax(lr=lr, decay=decay, clipnorm=clipnorm)
        # It is recommended to leave the parameters of this optimizer at their
        # default values (except the learning rate, which can be freely tuned).
        # This optimizer is usually a good choice for recurrent neural networks
        elif optimizer == 'rmsprop':
            optimizer_ = optimizers.RMSprop(lr=lr, clipnorm=clipnorm)
        # It is recommended to leave the parameters of these optimizers at their
        # default values.
        elif optimizer == 'adagrad':
            optimizer_ = optimizers.Adagrad(clipnorm=clipnorm)
        elif optimizer == 'adadelta':
            optimizer_ = optimizers.Adadelta(clipnorm=clipnorm)
        elif optimizer == 'nadam':
            optimizer_ = optimizers.Nadam(clipnorm=clipnorm)
        else:
            err_msg = "Argument for `optimizer` is invalid, got: {}".format(optimizer)
            LOGGER.error('ValueError %s', err_msg)
            raise ValueError(err_msg)

        model.compile(optimizer=optimizer_, loss=loss_function) 
开发者ID:BaderLab,项目名称:saber,代码行数:41,代码来源:base_model.py

示例14: __init__

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def __init__(self, model_dict, model_callback=None, preprocessing=None,
                 lr=0.001, loss='mean_squared_error', prefix='', postfix='',
                 method=Nadam, train_file=None, do_transform=True, **kwargs):
        self._model_dict = model_dict
        self.model_name = model_dict['name']
        if 'file_name' in model_dict:
            self._file_name = model_dict['file_name']
        else:
            self._file_name = self.model_name
        if 'transformation' in model_dict:
            self._func = model_dict['transformation']
        else:            
            self._func = None
        
        if 'inverse_transformation' in model_dict:
            self._inv_func = model_dict['inverse_transformation']
        else:            
            self._inv_func = None  
        
        self.name = prefix + self.model_name
        self.postfix = postfix
        
        if train_file is not None:
            self.train_file_name = train_file
        else:
            self.train_file_name = flatten_name(self.name)
            self.train_file_name = self.train_file_name.lower().replace('/', '_')
            self.train_file_name = du.data_dir + self.train_file_name
		
        self.do_transform = do_transform
        
        #Get training data if required
        if 'valid_size' in kwargs:
            self.valid_size = kwargs['valid_size']
        else:
            self.valid_size = 0.2
            
        if 'test_size' in kwargs:
            self.test_size = kwargs['test_size']
        else:
            self.test_size = 0.2
            
        if 'total_size' in kwargs:
            self.total_size = kwargs['total_size']
        else:
            self.total_size = 1.0
        self.__get_data()
        
        self.model = None
        self.history = None
        self.method = method
        self._model_callback = model_callback
        self.lr = lr
        self.loss = loss
        self._preprocessing = preprocessing 
开发者ID:Andres-Hernandez,项目名称:CalibrationNN,代码行数:57,代码来源:neural_network.py

示例15: lstm_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Nadam [as 别名]
def lstm_model(self):
        model = Sequential()
        first = True
        for idx in range(len(self.paras.model['hidden_layers'])):
            if idx == (len(self.paras.model['hidden_layers']) - 1):
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
            elif first == True:
                model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
                               units=int(self.paras.model['hidden_layers'][idx]),
                               return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
                first = False
            else:
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))

        if self.paras.model['optimizer'] == 'sgd':
            #optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
        elif self.paras.model['optimizer'] == 'rmsprop':
            #optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adagrad':
            #optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adam':
            #optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adadelta':
            optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adamax':
            optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'nadam':
            optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
        else:
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        # output layer
        model.add(Dense(units=self.paras.model['out_layer']))
        model.add(Activation(self.paras.model['out_activation']))
        model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])

        return model 
开发者ID:doncat99,项目名称:StockRecommendSystem,代码行数:49,代码来源:Stock_Prediction_Model_Stateless_LSTM.py


注:本文中的keras.optimizers.Nadam方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。