本文整理汇总了Python中keras.optimizers.adam方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.adam方法的具体用法?Python optimizers.adam怎么用?Python optimizers.adam使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.optimizers
的用法示例。
在下文中一共展示了optimizers.adam方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compile
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def compile(self, lr: float=0.01, accum: bool=False):
"""
Compiles model using specific loss (euclidean distance, optimizer (ADAM) and metrics (angular error)
:param lr: learning rate
:param accum: True if wait for several mini-batch to update.
"""
if accum:
opt = Adam_accumulate(lr=lr, accum_iters=8)
else:
opt = adam(lr)
if self.model is None:
raise ValueError('Only defined models can be compiled.')
else:
# Use mean euclidean distance as loss and angular error and mse as metric
self.model.compile(loss=euclidean_distance,
optimizer=opt,
metrics=[angle_error, 'mse'])
示例2: model_keras
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def model_keras(num_words=3000, num_units=128):
'''
生成RNN模型
:param num_words:词汇数量
:param num_units:词向量维度,lstm神经元数量默认一样
:return:
'''
data_input = Input(shape=[None])
embedding = Embedding(input_dim=num_words, output_dim=num_units, mask_zero=True)(data_input)
lstm = LSTM(units=num_units, return_sequences=True)(embedding)
x = LSTM(units=num_units, return_sequences=True)(lstm)
# keras好像不支持内部对y操作,不能像tensorflow那样用reshape
# x = Reshape(target_shape=[-1, num_units])(x)
outputs = Dense(units=num_words, activation='softmax')(x)
model = Model(inputs=data_input, outputs=outputs)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizers.adam(lr=0.01),
metrics=['accuracy'])
return model
示例3: compile_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def compile_model(model, cfg):
if cfg["optimizer"]["name"] == "SGD":
optimizer = optimizers.SGD(
lr=cfg["optimizer"]["lr"], momentum=cfg["optimizer"]["momentum"], decay=cfg["optimizer"]["decay"])
elif cfg["optimizer"]["name"] == "adam":
optimizer = optimizers.adam(lr=cfg["optimizer"]["lr"],
beta_1=cfg["optimizer"]["beta_1"],
beta_2=cfg["optimizer"]["beta_2"],
epsilon=cfg["optimizer"]["epsilon"],
decay=cfg["optimizer"]["decay"],
clipnorm=cfg["optimizer"]["clipnorm"])
else:
raise ValueError(
"Configuration error: the specified optimizer is not yet implemented.")
model.compile(optimizer, loss=config.LOSS, metrics=config.METRICS)
model.summary()
return model
示例4: __init__
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def __init__(self, scale=3, load_set=None, build_model=None,
optimizer='adam', save_dir='.'):
self.scale = scale
self.load_set = partial(load_set, scale=scale)
self.build_model = partial(build_model, scale=scale)
self.optimizer = optimizer
self.save_dir = Path(save_dir)
self.save_dir.mkdir(parents=True, exist_ok=True)
self.config_file = self.save_dir / 'config.yaml'
self.model_file = self.save_dir / 'model.hdf5'
self.train_dir = self.save_dir / 'train'
self.train_dir.mkdir(exist_ok=True)
self.history_file = self.train_dir / 'history.csv'
self.weights_dir = self.train_dir / 'weights'
self.weights_dir.mkdir(exist_ok=True)
self.test_dir = self.save_dir / 'test'
self.test_dir.mkdir(exist_ok=True)
示例5: get_classification_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def get_classification_model(input_dim, nodes_per_layer):
model = Sequential()
model.add(Dense(nodes_per_layer, input_dim=input_dim, activation='tanh'))
model.add(Dropout(.2))
model.add(Dense(nodes_per_layer, activation='tanh'))
model.add(Dropout(.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(adam(lr=0.0005), loss='binary_crossentropy',
metrics=['accuracy'])
return model
示例6: get_regression_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def get_regression_model(input_dim, nodes_per_layer):
model = Sequential()
model.add(Dense(nodes_per_layer, input_dim=input_dim, activation='tanh'))
model.add(Dropout(.2))
model.add(Dense(nodes_per_layer, activation='tanh'))
model.add(Dropout(.2))
model.add(Dense(1, activation='linear'))
model.compile(adam(lr=0.0005), loss='mse')
return model
示例7: build_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def build_model(self):
model = Sequential()
model.add(Dense(64, input_shape=(self.state_space,), activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(self.action_space, activation='linear'))
model.compile(loss='mse', optimizer=adam(lr=self.learning_rate))
return model
示例8: test_single_h5
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def test_single_h5(FLAGS, h5_weights_path):
if not os.path.isfile(h5_weights_path):
print('%s is not a h5 weights file path' % h5_weights_path)
return
optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001)
objective = 'categorical_crossentropy'
metrics = ['accuracy']
model = model_fn(FLAGS, objective, optimizer, metrics)
load_weights(model, FLAGS.eval_weights_path)
img_names, test_data, test_labels = load_test_data(FLAGS)
predictions = model.predict(test_data, verbose=0)
right_count = 0
error_infos = []
for index, pred in enumerate(predictions):
pred_label = np.argmax(pred, axis=0)
test_label = test_labels[index]
if pred_label == test_label:
right_count += 1
else:
error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label))
accuracy = right_count / len(img_names)
print('accuracy: %s' % accuracy)
result_file_name = os.path.join(os.path.dirname(h5_weights_path),
'%s_accuracy.txt' % os.path.basename(h5_weights_path))
with open(result_file_name, 'w') as f:
f.write('# predict error files\n')
f.write('####################################\n')
f.write('file_name, true_label, pred_label\n')
f.writelines(error_infos)
f.write('####################################\n')
f.write('accuracy: %s\n' % accuracy)
print('end')
示例9: load_weights_save_pb
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def load_weights_save_pb(FLAGS):
optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001)
objective = 'categorical_crossentropy'
metrics = ['accuracy']
model = model_fn(FLAGS, objective, optimizer, metrics)
load_weights(model, FLAGS.freeze_weights_file_path)
save_pb_model(FLAGS, model)
示例10: train
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def train(self, x, y, learning_rate=0.01, epochs=200):
optimizer = optimizers.adam(lr=learning_rate, decay=1e-6)
self._model.compile(loss="mean_squared_error", optimizer=optimizer)
self._model.fit(x, y, batch_size=32, validation_split=0.05, epochs=epochs, verbose=1)
示例11: generate
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def generate(file_list, data_dir, output_dir, context_len=32, stats=None,
base_model_path='./pls.model', gan_model_path='./noise_gen.model'):
pulse_model = time_glot_model(timesteps=context_len)
gan_model = generator()
pulse_model.compile(loss='mse', optimizer="adam")
gan_model.compile(loss='mse', optimizer="adam")
pulse_model.load_weights(base_model_path)
gan_model.load_weights(gan_model_path)
for data in nc_data_provider(file_list, data_dir, input_only=True,
context_len=context_len):
for fname, ac_data in data.iteritems():
print (fname)
pls_pred, _ = pulse_model.predict([ac_data])
noise = np.random.randn(pls_pred.shape[0], pls_pred.shape[1])
pls_gan, _ = gan_model.predict([pls_pred, noise])
out_file = os.path.join(args.output_dir, fname + '.pls')
pls_gan.astype(np.float32).tofile(out_file)
out_file = os.path.join(args.output_dir, fname + '.pls_nonoise')
pls_pred.astype(np.float32).tofile(out_file)
示例12: penalized_loss
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def penalized_loss(y_true, y_pred):
beta = 0.5
loss1 = mean_absolute_percentage_error(y_true, y_pred)
loss2 = K.mean(K.maximum(K.max(y_pred, axis=1) - input_D_max, 0.), axis=-1)
loss3 = K.mean(K.maximum(input_D_min - K.min(y_pred, axis=1), 0.), axis=-1)
return loss1 + beta * (loss2 + loss3)
#model.compile(optimizer = 'rmsprop', loss = 'mape')
#model.compile(optimizer = 'adam', loss = penalized_loss)
示例13: buildModel
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def buildModel(embeddingMatrix):
"""Constructs the architecture of the model
Input:
embeddingMatrix : The embedding matrix to be loaded in the embedding layer.
Output:
model : A basic LSTM model
"""
x1 = Input(shape=(100,), dtype='int32', name='main_input1')
x2 = Input(shape=(100,), dtype='int32', name='main_input2')
x3 = Input(shape=(100,), dtype='int32', name='main_input3')
embeddingLayer = Embedding(embeddingMatrix.shape[0],
EMBEDDING_DIM,
weights=[embeddingMatrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
emb1 = embeddingLayer(x1)
emb2 = embeddingLayer(x2)
emb3 = embeddingLayer(x3)
lstm = Bidirectional(LSTM(LSTM_DIM, dropout=DROPOUT))
lstm1 = lstm(emb1)
lstm2 = lstm(emb2)
lstm3 = lstm(emb3)
inp = Concatenate(axis=-1)([lstm1, lstm2, lstm3])
inp = Reshape((3, 2*LSTM_DIM, )) (inp)
lstm_up = LSTM(LSTM_DIM, dropout=DROPOUT)
out = lstm_up(inp)
out = Dense(NUM_CLASSES, activation='softmax')(out)
adam = optimizers.adam(lr=LEARNING_RATE)
model = Model([x1,x2,x3],out)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['acc'])
print(model.summary())
return model
示例14: load_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import adam [as 别名]
def load_model(cfg):
if cfg["type"] == "lstm":
model = create_lstm(hidden_units=cfg["hidden_units"],
dropout=cfg["dropout"],
bidirectional=cfg["bidirectional"])
elif cfg["type"] == "cldnn":
model = create_cldnn(filters_list=cfg["filters_list"],
lstm_units=cfg["lstm_units"],
fc_units=cfg["fc_units"],
kernel_sizes=cfg["kernel_sizes"],
dropout=cfg["dropout"])
elif cfg["type"] == "tcn":
model = create_tcn(list_n_filters=cfg["list_n_filters"],
kernel_size=cfg["kernel_size"],
dilations=cfg["dilations"],
nb_stacks=cfg["nb_stacks"],
activation=cfg["activation"],
n_layers=cfg["n_layers"],
dropout_rate=cfg["dropout_rate"],
use_skip_connections=cfg["use_skip_connections"],
bidirectional=cfg["bidirectional"])
else:
raise ValueError(
"Configuration error: the specified model is not yet implemented.")
if cfg["optimizer"]["name"] == "SGD":
optimizer = optimizers.SGD(
lr=cfg["optimizer"]["lr"], momentum=cfg["optimizer"]["momentum"], decay=cfg["optimizer"]["decay"])
elif cfg["optimizer"]["name"] == "adam":
optimizer = optimizers.adam(lr=cfg["optimizer"]["lr"],
beta_1=cfg["optimizer"]["beta_1"],
beta_2=cfg["optimizer"]["beta_2"],
epsilon=cfg["optimizer"]["epsilon"],
decay=cfg["optimizer"]["decay"],
clipnorm=cfg["optimizer"]["clipnorm"])
else:
raise ValueError(
"Configuration error: the specified optimizer is not yet implemented.")
model.compile(optimizer, loss=config.LOSS, metrics=config.METRICS)
model.summary()
return model