当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.Adam方法代码示例

本文整理汇总了Python中keras.optimizers.Adam方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.Adam方法的具体用法?Python optimizers.Adam怎么用?Python optimizers.Adam使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.Adam方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def build_model(config):
    """Builds the cnn."""
    params = config.model_arch
    get_model = getattr(models, 'get_model_'+str(params['architecture']))
    model = get_model(params)
    #model = model_kenun.build_convnet_model(params)
    # Learning setup
    t_params = config.training_params
    sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
              momentum=t_params["momentum"], nesterov=t_params["nesterov"])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    optimizer = eval(t_params['optimizer'])
    metrics = ['mean_squared_error']
    if config.model_arch["final_activation"] == 'softmax':
        metrics.append('categorical_accuracy')
    if t_params['loss_func'] == 'cosine':
        loss_func = eval(t_params['loss_func'])
    else:
        loss_func = t_params['loss_func']
    model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)

    return model 
开发者ID:sergiooramas,项目名称:tartarus,代码行数:24,代码来源:train.py

示例2: train_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:22,代码来源:NER.py

示例3: create_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def create_model():
    inputs = Input(shape=(length,), dtype='int32', name='inputs')
    embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
    bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
    bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
    embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
    con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
    con_d = Dropout(DROPOUT_RATE)(con)
    dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
    rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
    dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
    crf = CRF(len(chunk_tags), sparse_target=True)
    crf_output = crf(dense)
    model = Model(input=[inputs], output=[crf_output])
    model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
    return model 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:18,代码来源:cnn_rnn_crf.py

示例4: evaluate

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def evaluate(self, inputs, outputs):
        keras.backend.clear_session()

        X = Input(self.X_train[0].shape)
        co.forward({inputs['in']: X})
        logits = outputs['out'].val
        probs = Activation('softmax')(logits)

        model = Model(inputs=[inputs['in'].val], outputs=[probs])
        model.compile(optimizer=Adam(lr=self.learning_rate),
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        history = model.fit(self.X_train,
                            self.y_train,
                            batch_size=self.batch_size,
                            epochs=self.num_training_epochs,
                            validation_data=(self.X_val, self.y_val))
        results = {'validation_accuracy': history.history['val_accuracy'][-1]}
        return results 
开发者ID:negrinho,项目名称:deep_architect,代码行数:22,代码来源:main_keras.py

示例5: fit

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def fit(self, X, y):
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        assert len(X.shape) == 2
        N, d = X.shape

        from keras.models import Sequential
        from keras.layers import Dense
        from keras.optimizers import Adam
        model = Sequential()
        model.add(Dense(10, input_dim=d, activation="relu"))
        model.add(Dense(10, activation="relu"))
        model.add(Dense(1, activation="relu"))
        model.compile(loss="mse", optimizer=Adam(lr=0.005))
        self.model = model

        n_epochs = 100
        self.model.fit(X, y, epochs=n_epochs, verbose=False) 
开发者ID:ankonzoid,项目名称:LearningX,代码行数:19,代码来源:NN_regr.py

示例6: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])
        
        # 크로스 엔트로피 오류함수 계산
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)
        
        # 정책신경망을 업데이트하는 훈련함수 생성
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights,[],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # 정책신경망으로 행동 선택 
开发者ID:rlcode,项目名称:reinforcement-learning-kr,代码行数:21,代码来源:reinforce_agent.py

示例7: load_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def load_model(stamp):
	"""
	"""

	json_file = open(stamp+'.json', 'r')
	loaded_model_json = json_file.read()
	json_file.close()
	model = model_from_json(loaded_model_json, {'AttentionWithContext': AttentionWithContext})

	model.load_weights(stamp+'.h5')
	print("Loaded model from disk")

	model.summary()


	adam = Adam(lr=0.001)
	model.compile(loss='binary_crossentropy',
		optimizer=adam,
		metrics=[f1_score])


	return model 
开发者ID:AlexGidiotis,项目名称:Document-Classifier-LSTM,代码行数:24,代码来源:classifier.py

示例8: optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])

        # Calculate cross entropy error function
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)

        # create training function
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights, [],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # get action from policy network 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:21,代码来源:reinforce_agent.py

示例9: actor_optimizer

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def actor_optimizer(self):
        action = K.placeholder(shape=(None, self.action_size))
        advantages = K.placeholder(shape=(None, ))

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
        loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)

        actor_loss = loss + 0.01*entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
        train = K.function([self.actor.input, action, advantages], [], updates=updates)
        return train

    # make loss function for Value approximation 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:22,代码来源:cartpole_a3c.py

示例10: value_distribution_network

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate):
        """Model Value Distribution

        With States as inputs and output Probability Distributions for all Actions
        """

        state_input = Input(shape=(input_shape)) 
        cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input)
        cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature)
        cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature)
        cnn_feature = Flatten()(cnn_feature)
        cnn_feature = Dense(512, activation='relu')(cnn_feature)

        distribution_list = []
        for i in range(action_size):
            distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature))

        model = Model(input=state_input, output=distribution_list)

        adam = Adam(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',optimizer=adam)

        return model 
开发者ID:flyyufelix,项目名称:C51-DDQN-Keras,代码行数:25,代码来源:networks.py

示例11: _build

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def _build(self):
        # the model that will be trained
        rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM))
        lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True)

        lstm_output, _, _ = lstm(rnn_x)
        mdn = Dense(Z_DIM)(lstm_output)

        rnn = Model(rnn_x, mdn)

        # the model used during prediction
        state_input_h = Input(shape=(HIDDEN_UNITS,))
        state_input_c = Input(shape=(HIDDEN_UNITS,))
        state_inputs = [state_input_h, state_input_c]
        
        _, state_h, state_c = lstm(rnn_x, initial_state=state_inputs)
        forward = Model([rnn_x] + state_inputs, [state_h, state_c])

        optimizer = Adam(lr=0.0001)
        # optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True)
        rnn.compile(loss='mean_squared_error', optimizer=optimizer)

        return [rnn, forward] 
开发者ID:marooncn,项目名称:navbot,代码行数:25,代码来源:RNN.py

示例12: build_3dcnn_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def build_3dcnn_model(self, fusion_type, Fusion):
        if len(Fusion[0]) == 1: 
            input_shape = (32, 32, len(Fusion))
            model_in,model = self.cnn_2D(input_shape) 
        else:
            input_shape = (32, 32, 5, len(Fusion))
            model_in,model = self.cnn_3D(input_shape) 
        model = Dropout(0.5)(model)
        model = Dense(32, activation='relu', name = 'fc2')(model)
        model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) 
        model = Model(input=model_in,output=model)
        # 统计参数
        # model.summary()
        plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True)
        print('    Saving model  Architecture')
        
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定
        model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) 
        
        return model 
开发者ID:xyj77,项目名称:MCF-3D-CNN,代码行数:23,代码来源:liver_model.py

示例13: build_vgg16

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def build_vgg16(image_size=None):
	image_size = image_size or (240, 240)
	if K.image_dim_ordering() == 'th':
	    input_shape = (3,) + image_size
	else:
	    input_shape = image_size + (3, )
	bottleneck_model = vgg16.VGG16(include_top=False, 
	                               input_tensor=Input(input_shape))
	#bottleneck_model.trainable = False
	for layer in bottleneck_model.layers:
	    layer.trainable = False

	x = bottleneck_model.input
	y = bottleneck_model.output
	y = Flatten()(y)
	y = BatchNormalization()(y)
	y = Dense(2048, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1024, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1)(y)

	model = Model(input=x, output=y)
	model.compile(optimizer=Adam(lr=1e-4), loss = 'mse')
	return model 
开发者ID:dolaameng,项目名称:udacity-SDC-baseline,代码行数:27,代码来源:model.py

示例14: pre_train_generator

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def pre_train_generator(self, g_epochs=3, g_pre_path=None, lr=1e-3):
        if g_pre_path is None:
            self.g_pre_path = os.path.join(self.top, 'data', 'save', 'generator_pre.hdf5')
        else:
            self.g_pre_path = g_pre_path

        g_adam = Adam(lr)
        self.generator_pre.compile(g_adam, 'categorical_crossentropy')
        print('Generator pre-training')
        self.generator_pre.summary()

        self.generator_pre.fit_generator(
            self.g_data,
            steps_per_epoch=None,
            epochs=g_epochs)
        self.generator_pre.save_weights(self.g_pre_path)
        self.reflect_pre_train() 
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:19,代码来源:train.py

示例15: siamese_model

# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Adam [as 别名]
def siamese_model(type):
  if type=='plate':
    input_shape = (image_size_h_p,image_size_w_p,nchannels)
  else:
    input_shape = (image_size_h_c,image_size_w_c,nchannels)
  left_input = Input(input_shape)
  right_input = Input(input_shape)
  convnet = small_vgg(input_shape)
  encoded_l = convnet(left_input)
  encoded_r = convnet(right_input)

  # Add the distance function to the network
  L1_distance = L1_layer([encoded_l, encoded_r])

  prediction = Dense(2,activation='softmax')(L1_distance)
  optimizer = Adam(0.001, decay=2.5e-4)

  model = Model(inputs=[left_input,right_input],outputs=prediction)
  model.compile(loss="binary_crossentropy",optimizer=optimizer,metrics=['accuracy'])

  return model
#------------------------------------------------------------------------------ 
开发者ID:icarofua,项目名称:siamese-two-stream,代码行数:24,代码来源:siamese.py


注:本文中的keras.optimizers.Adam方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。