當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizers.Adam方法代碼示例

本文整理匯總了Python中keras.optimizers.Adam方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizers.Adam方法的具體用法?Python optimizers.Adam怎麽用?Python optimizers.Adam使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.Adam方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def build_model(config):
    """Builds the cnn."""
    params = config.model_arch
    get_model = getattr(models, 'get_model_'+str(params['architecture']))
    model = get_model(params)
    #model = model_kenun.build_convnet_model(params)
    # Learning setup
    t_params = config.training_params
    sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
              momentum=t_params["momentum"], nesterov=t_params["nesterov"])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    optimizer = eval(t_params['optimizer'])
    metrics = ['mean_squared_error']
    if config.model_arch["final_activation"] == 'softmax':
        metrics.append('categorical_accuracy')
    if t_params['loss_func'] == 'cosine':
        loss_func = eval(t_params['loss_func'])
    else:
        loss_func = t_params['loss_func']
    model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)

    return model 
開發者ID:sergiooramas,項目名稱:tartarus,代碼行數:24,代碼來源:train.py

示例2: train_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
開發者ID:jtyoui,項目名稱:Jtyoui,代碼行數:22,代碼來源:NER.py

示例3: create_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def create_model():
    inputs = Input(shape=(length,), dtype='int32', name='inputs')
    embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
    bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
    bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
    embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
    con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
    con_d = Dropout(DROPOUT_RATE)(con)
    dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
    rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
    dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
    crf = CRF(len(chunk_tags), sparse_target=True)
    crf_output = crf(dense)
    model = Model(input=[inputs], output=[crf_output])
    model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
    return model 
開發者ID:jtyoui,項目名稱:Jtyoui,代碼行數:18,代碼來源:cnn_rnn_crf.py

示例4: evaluate

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def evaluate(self, inputs, outputs):
        keras.backend.clear_session()

        X = Input(self.X_train[0].shape)
        co.forward({inputs['in']: X})
        logits = outputs['out'].val
        probs = Activation('softmax')(logits)

        model = Model(inputs=[inputs['in'].val], outputs=[probs])
        model.compile(optimizer=Adam(lr=self.learning_rate),
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        history = model.fit(self.X_train,
                            self.y_train,
                            batch_size=self.batch_size,
                            epochs=self.num_training_epochs,
                            validation_data=(self.X_val, self.y_val))
        results = {'validation_accuracy': history.history['val_accuracy'][-1]}
        return results 
開發者ID:negrinho,項目名稱:deep_architect,代碼行數:22,代碼來源:main_keras.py

示例5: fit

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def fit(self, X, y):
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        assert len(X.shape) == 2
        N, d = X.shape

        from keras.models import Sequential
        from keras.layers import Dense
        from keras.optimizers import Adam
        model = Sequential()
        model.add(Dense(10, input_dim=d, activation="relu"))
        model.add(Dense(10, activation="relu"))
        model.add(Dense(1, activation="relu"))
        model.compile(loss="mse", optimizer=Adam(lr=0.005))
        self.model = model

        n_epochs = 100
        self.model.fit(X, y, epochs=n_epochs, verbose=False) 
開發者ID:ankonzoid,項目名稱:LearningX,代碼行數:19,代碼來源:NN_regr.py

示例6: optimizer

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])
        
        # 크로스 엔트로피 오류함수 계산
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)
        
        # 정책신경망을 업데이트하는 훈련함수 생성
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights,[],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # 정책신경망으로 행동 선택 
開發者ID:rlcode,項目名稱:reinforcement-learning-kr,代碼行數:21,代碼來源:reinforce_agent.py

示例7: load_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def load_model(stamp):
	"""
	"""

	json_file = open(stamp+'.json', 'r')
	loaded_model_json = json_file.read()
	json_file.close()
	model = model_from_json(loaded_model_json, {'AttentionWithContext': AttentionWithContext})

	model.load_weights(stamp+'.h5')
	print("Loaded model from disk")

	model.summary()


	adam = Adam(lr=0.001)
	model.compile(loss='binary_crossentropy',
		optimizer=adam,
		metrics=[f1_score])


	return model 
開發者ID:AlexGidiotis,項目名稱:Document-Classifier-LSTM,代碼行數:24,代碼來源:classifier.py

示例8: optimizer

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])

        # Calculate cross entropy error function
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)

        # create training function
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights, [],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # get action from policy network 
開發者ID:rlcode,項目名稱:reinforcement-learning,代碼行數:21,代碼來源:reinforce_agent.py

示例9: actor_optimizer

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def actor_optimizer(self):
        action = K.placeholder(shape=(None, self.action_size))
        advantages = K.placeholder(shape=(None, ))

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
        loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)

        actor_loss = loss + 0.01*entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
        train = K.function([self.actor.input, action, advantages], [], updates=updates)
        return train

    # make loss function for Value approximation 
開發者ID:rlcode,項目名稱:reinforcement-learning,代碼行數:22,代碼來源:cartpole_a3c.py

示例10: value_distribution_network

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate):
        """Model Value Distribution

        With States as inputs and output Probability Distributions for all Actions
        """

        state_input = Input(shape=(input_shape)) 
        cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input)
        cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature)
        cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature)
        cnn_feature = Flatten()(cnn_feature)
        cnn_feature = Dense(512, activation='relu')(cnn_feature)

        distribution_list = []
        for i in range(action_size):
            distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature))

        model = Model(input=state_input, output=distribution_list)

        adam = Adam(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',optimizer=adam)

        return model 
開發者ID:flyyufelix,項目名稱:C51-DDQN-Keras,代碼行數:25,代碼來源:networks.py

示例11: _build

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def _build(self):
        # the model that will be trained
        rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM))
        lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True)

        lstm_output, _, _ = lstm(rnn_x)
        mdn = Dense(Z_DIM)(lstm_output)

        rnn = Model(rnn_x, mdn)

        # the model used during prediction
        state_input_h = Input(shape=(HIDDEN_UNITS,))
        state_input_c = Input(shape=(HIDDEN_UNITS,))
        state_inputs = [state_input_h, state_input_c]
        
        _, state_h, state_c = lstm(rnn_x, initial_state=state_inputs)
        forward = Model([rnn_x] + state_inputs, [state_h, state_c])

        optimizer = Adam(lr=0.0001)
        # optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True)
        rnn.compile(loss='mean_squared_error', optimizer=optimizer)

        return [rnn, forward] 
開發者ID:marooncn,項目名稱:navbot,代碼行數:25,代碼來源:RNN.py

示例12: build_3dcnn_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def build_3dcnn_model(self, fusion_type, Fusion):
        if len(Fusion[0]) == 1: 
            input_shape = (32, 32, len(Fusion))
            model_in,model = self.cnn_2D(input_shape) 
        else:
            input_shape = (32, 32, 5, len(Fusion))
            model_in,model = self.cnn_3D(input_shape) 
        model = Dropout(0.5)(model)
        model = Dense(32, activation='relu', name = 'fc2')(model)
        model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) 
        model = Model(input=model_in,output=model)
        # 統計參數
        # model.summary()
        plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True)
        print('    Saving model  Architecture')
        
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不穩定
        model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) 
        
        return model 
開發者ID:xyj77,項目名稱:MCF-3D-CNN,代碼行數:23,代碼來源:liver_model.py

示例13: build_vgg16

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def build_vgg16(image_size=None):
	image_size = image_size or (240, 240)
	if K.image_dim_ordering() == 'th':
	    input_shape = (3,) + image_size
	else:
	    input_shape = image_size + (3, )
	bottleneck_model = vgg16.VGG16(include_top=False, 
	                               input_tensor=Input(input_shape))
	#bottleneck_model.trainable = False
	for layer in bottleneck_model.layers:
	    layer.trainable = False

	x = bottleneck_model.input
	y = bottleneck_model.output
	y = Flatten()(y)
	y = BatchNormalization()(y)
	y = Dense(2048, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1024, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1)(y)

	model = Model(input=x, output=y)
	model.compile(optimizer=Adam(lr=1e-4), loss = 'mse')
	return model 
開發者ID:dolaameng,項目名稱:udacity-SDC-baseline,代碼行數:27,代碼來源:model.py

示例14: pre_train_generator

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def pre_train_generator(self, g_epochs=3, g_pre_path=None, lr=1e-3):
        if g_pre_path is None:
            self.g_pre_path = os.path.join(self.top, 'data', 'save', 'generator_pre.hdf5')
        else:
            self.g_pre_path = g_pre_path

        g_adam = Adam(lr)
        self.generator_pre.compile(g_adam, 'categorical_crossentropy')
        print('Generator pre-training')
        self.generator_pre.summary()

        self.generator_pre.fit_generator(
            self.g_data,
            steps_per_epoch=None,
            epochs=g_epochs)
        self.generator_pre.save_weights(self.g_pre_path)
        self.reflect_pre_train() 
開發者ID:tyo-yo,項目名稱:SeqGAN,代碼行數:19,代碼來源:train.py

示例15: siamese_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Adam [as 別名]
def siamese_model(type):
  if type=='plate':
    input_shape = (image_size_h_p,image_size_w_p,nchannels)
  else:
    input_shape = (image_size_h_c,image_size_w_c,nchannels)
  left_input = Input(input_shape)
  right_input = Input(input_shape)
  convnet = small_vgg(input_shape)
  encoded_l = convnet(left_input)
  encoded_r = convnet(right_input)

  # Add the distance function to the network
  L1_distance = L1_layer([encoded_l, encoded_r])

  prediction = Dense(2,activation='softmax')(L1_distance)
  optimizer = Adam(0.001, decay=2.5e-4)

  model = Model(inputs=[left_input,right_input],outputs=prediction)
  model.compile(loss="binary_crossentropy",optimizer=optimizer,metrics=['accuracy'])

  return model
#------------------------------------------------------------------------------ 
開發者ID:icarofua,項目名稱:siamese-two-stream,代碼行數:24,代碼來源:siamese.py


注:本文中的keras.optimizers.Adam方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。