本文整理汇总了Python中tensorflow.keras.optimizers.SGD属性的典型用法代码示例。如果您正苦于以下问题:Python optimizers.SGD属性的具体用法?Python optimizers.SGD怎么用?Python optimizers.SGD使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tensorflow.keras.optimizers
的用法示例。
在下文中一共展示了optimizers.SGD属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with graph.as_default():
if sess is not None:
set_session(sess)
inp = None
output = None
if self.shared_network is None:
inp = Input((self.input_dim,))
output = self.get_network_head(inp).output
else:
inp = self.shared_network.input
output = self.shared_network.output
output = Dense(
self.output_dim, activation=self.activation,
kernel_initializer='random_normal')(output)
self.model = Model(inp, output)
self.model.compile(
optimizer=SGD(lr=self.lr), loss=self.loss)
示例2: test_clone_optimizer
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def test_clone_optimizer():
lr, momentum, clipnorm, clipvalue = np.random.random(size=4)
optimizer = SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=clipvalue)
clone = clone_optimizer(optimizer)
assert isinstance(clone, SGD)
assert K.get_value(optimizer.lr) == K.get_value(clone.lr)
assert K.get_value(optimizer.momentum) == K.get_value(clone.momentum)
assert optimizer.clipnorm == clone.clipnorm
assert optimizer.clipvalue == clone.clipvalue
示例3: test_clone_optimizer_from_string
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def test_clone_optimizer_from_string():
clone = clone_optimizer('sgd')
assert isinstance(clone, SGD)
示例4: main
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def main():
model = create_model(trainable=TRAINABLE)
model.summary()
if TRAINABLE:
model.load_weights(WEIGHTS)
train_datagen = DataGenerator(TRAIN_CSV)
val_generator = DataGenerator(VALIDATION_CSV, rnd_rescale=False, rnd_multiply=False, rnd_crop=False, rnd_flip=False, debug=False)
validation_datagen = Validation(generator=val_generator)
learning_rate = LEARNING_RATE
if TRAINABLE:
learning_rate /= 10
optimizer = SGD(lr=learning_rate, decay=LR_DECAY, momentum=0.9, nesterov=False)
model.compile(loss=detection_loss(), optimizer=optimizer, metrics=[])
checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True,
save_weights_only=True, mode="max")
stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max")
reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.6, patience=5, min_lr=1e-6, verbose=1, mode="max")
model.fit_generator(generator=train_datagen,
epochs=EPOCHS,
callbacks=[validation_datagen, checkpoint, reduce_lr, stop],
workers=THREADS,
use_multiprocessing=MULTITHREADING,
shuffle=True,
verbose=1)
示例5: _get_data_and_model
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def _get_data_and_model(args):
# prepare dataset
if args.method in ['FcDEC', 'FcIDEC', 'FcDEC-DA', 'FcIDEC-DA']:
x, y = load_data(args.dataset)
elif args.method in ['ConvDEC', 'ConvIDEC', 'ConvDEC-DA', 'ConvIDEC-DA']:
x, y = load_data_conv(args.dataset)
else:
raise ValueError("Invalid value for method, which can only be in ['FcDEC', 'FcIDEC', 'ConvDEC', 'ConvIDEC', "
"'FcDEC-DA', 'FcIDEC-DA', 'ConvDEC-DA', 'ConvIDEC-DA']")
# prepare optimizer
if args.optimizer in ['sgd', 'SGD']:
optimizer = SGD(args.lr, 0.9)
else:
optimizer = Adam()
# prepare the model
n_clusters = len(np.unique(y))
if 'FcDEC' in args.method:
model = FcDEC(dims=[x.shape[-1], 500, 500, 2000, 10], n_clusters=n_clusters)
model.compile(optimizer=optimizer, loss='kld')
elif 'FcIDEC' in args.method:
model = FcIDEC(dims=[x.shape[-1], 500, 500, 2000, 10], n_clusters=n_clusters)
model.compile(optimizer=optimizer, loss=['kld', 'mse'], loss_weights=[0.1, 1.0])
elif 'ConvDEC' in args.method:
model = ConvDEC(input_shape=x.shape[1:], filters=[32, 64, 128, 10], n_clusters=n_clusters)
model.compile(optimizer=optimizer, loss='kld')
elif 'ConvIDEC' in args.method:
model = ConvIDEC(input_shape=x.shape[1:], filters=[32, 64, 128, 10], n_clusters=n_clusters)
model.compile(optimizer=optimizer, loss=['kld', 'mse'], loss_weights=[0.1, 1.0])
else:
raise ValueError("Invalid value for method, which can only be in ['FcDEC', 'FcIDEC', 'ConvDEC', 'ConvIDEC', "
"'FcDEC-DA', 'FcIDEC-DA', 'ConvDEC-DA', 'ConvIDEC-DA']")
# if -DA method, we'll force aug_pretrain and aug_cluster is True
if '-DA' in args.method:
args.aug_pretrain = True
args.aug_cluster = True
return (x, y), model
示例6: train
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def train(args):
# get data and model
(x, y), model = _get_data_and_model(args)
model.model.summary()
# pretraining
t0 = time()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.pretrained_weights is not None and os.path.exists(args.pretrained_weights): # load pretrained weights
model.autoencoder.load_weights(args.pretrained_weights)
else: # train
pretrain_optimizer = SGD(1.0, 0.9) if args.method in ['FcDEC', 'FcIDEC', 'FcDEC-DA', 'FcIDEC-DA'] else 'adam'
model.pretrain(x, y, optimizer=pretrain_optimizer, epochs=args.pretrain_epochs, batch_size=args.batch_size,
save_dir=args.save_dir, verbose=args.verbose, aug_pretrain=args.aug_pretrain)
t1 = time()
print("Time for pretraining: %ds" % (t1 - t0))
# clustering
y_pred = model.fit(x, y=y, maxiter=args.maxiter, batch_size=args.batch_size, update_interval=args.update_interval,
save_dir=args.save_dir, aug_cluster=args.aug_cluster)
if y is not None:
print('Final: acc=%.4f, nmi=%.4f, ari=%.4f' %
(metrics.acc(y, y_pred), metrics.nmi(y, y_pred), metrics.ari(y, y_pred)))
t2 = time()
print("Time for pretaining, clustering and total: (%ds, %ds, %ds)" % (t1 - t0, t2 - t1, t2 - t0))
print('='*60)
示例7: start_training
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def start_training(working_dir, pre_training_phase=True):
ensures_dir(CHECKPOINTS_SOFTMAX_DIR)
ensures_dir(CHECKPOINTS_TRIPLET_DIR)
batch_input_shape = [None, NUM_FRAMES, NUM_FBANKS, 1]
if pre_training_phase:
logger.info('Softmax pre-training.')
kc = KerasFormatConverter(working_dir)
num_speakers_softmax = len(kc.categorical_speakers.speaker_ids)
dsm = DeepSpeakerModel(batch_input_shape, include_softmax=True, num_speakers_softmax=num_speakers_softmax)
dsm.m.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
pre_training_checkpoint = load_best_checkpoint(CHECKPOINTS_SOFTMAX_DIR)
if pre_training_checkpoint is not None:
initial_epoch = int(pre_training_checkpoint.split('/')[-1].split('.')[0].split('_')[-1])
logger.info(f'Initial epoch is {initial_epoch}.')
logger.info(f'Loading softmax checkpoint: {pre_training_checkpoint}.')
dsm.m.load_weights(pre_training_checkpoint) # latest one.
else:
initial_epoch = 0
fit_model_softmax(dsm, kc.kx_train, kc.ky_train, kc.kx_test, kc.ky_test, initial_epoch=initial_epoch)
else:
logger.info('Training with the triplet loss.')
dsm = DeepSpeakerModel(batch_input_shape, include_softmax=False)
triplet_checkpoint = load_best_checkpoint(CHECKPOINTS_TRIPLET_DIR)
pre_training_checkpoint = load_best_checkpoint(CHECKPOINTS_SOFTMAX_DIR)
if triplet_checkpoint is not None:
logger.info(f'Loading triplet checkpoint: {triplet_checkpoint}.')
dsm.m.load_weights(triplet_checkpoint)
elif pre_training_checkpoint is not None:
logger.info(f'Loading pre-training checkpoint: {pre_training_checkpoint}.')
# If `by_name` is True, weights are loaded into layers only if they share the
# same name. This is useful for fine-tuning or transfer-learning models where
# some of the layers have changed.
dsm.m.load_weights(pre_training_checkpoint, by_name=True)
dsm.m.compile(optimizer=SGD(), loss=deep_speaker_loss)
fit_model(dsm, working_dir, NUM_FRAMES)
示例8: _model
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def _model(
self,
forecaster_features=1,
forecaster_hidden_units=(8, 8),
):
# Forecaster
forecaster_input = Input(
(forecaster_features, ), name='forecaster_input_features'
)
forecaster_output = forecaster_input
forecaster_dense_units = list(forecaster_hidden_units) + [
1
] # append final output
for idx, units in enumerate(forecaster_dense_units):
forecaster_output = Dense(
units=units,
activation='relu',
name='forecaster_dense_{}'.format(idx)
)(forecaster_output)
forecaster_model = Model(
forecaster_input, forecaster_output, name='Forecaster'
)
optimizer = SGD(lr=0.001)
forecaster_model.compile(loss='mse', optimizer=optimizer)
return {
'forecaster': forecaster_model,
}
示例9: _model
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def _model(
self,
forecaster_features=1,
forecaster_hidden_units=(8, ),
lr=0.1,
):
# Forecaster
forecaster_input = Input(
(forecaster_features, ), name='forecaster_input_features'
)
forecaster_output = forecaster_input
forecaster_dense_units = list(forecaster_hidden_units) + [
1
] # append final output
for idx, units in enumerate(forecaster_dense_units):
forecaster_output = Dense(
units=units,
activation=None,
name='forecaster_dense_{}'.format(idx)
)(forecaster_output)
forecaster_model = Model(
forecaster_input, forecaster_output, name='Forecaster'
)
optimizer = SGD(lr=lr)
forecaster_model.compile(
optimizer=optimizer, loss='mse', metrics=['mae']
)
return {
'forecaster': forecaster_model,
}
示例10: _get_optimizer
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def _get_optimizer(optimizer, lr_mult=1.0):
"Get optimizer with correct learning rate."
if optimizer == "sgd":
return optimizers.SGD(lr=0.01*lr_mult)
elif optimizer == "rmsprop":
return optimizers.RMSprop(lr=0.001*lr_mult)
elif optimizer == "adagrad":
return optimizers.Adagrad(lr=0.01*lr_mult)
elif optimizer == "adam":
return optimizers.Adam(lr=0.001*lr_mult)
elif optimizer == "nadam":
return optimizers.Nadam(lr=0.002*lr_mult)
raise NotImplementedError
示例11: tf_keras_model
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def tf_keras_model(data):
x, y = data
model = TfSequential()
model.add(TfDense(3, input_dim=4))
model.add(TfDense(1))
model.compile(loss='mean_squared_error', optimizer=TfSGD(learning_rate=0.001))
model.fit(x, y)
return model
示例12: get_optimizer
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def get_optimizer(optim_type, learning_rate, decay_type='cosine', decay_steps=100000):
optim_type = optim_type.lower()
lr_scheduler = get_lr_scheduler(learning_rate, decay_type, decay_steps)
if optim_type == 'adam':
optimizer = Adam(learning_rate=lr_scheduler, amsgrad=False)
elif optim_type == 'rmsprop':
optimizer = RMSprop(learning_rate=lr_scheduler, rho=0.9, momentum=0.0, centered=False)
elif optim_type == 'sgd':
optimizer = SGD(learning_rate=lr_scheduler, momentum=0.0, nesterov=False)
else:
raise ValueError('Unsupported optimizer type')
return optimizer
示例13: get_optimizer
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def get_optimizer(optim_type, learning_rate):
if optim_type == 'sgd':
optimizer = SGD(lr=learning_rate, decay=5e-4, momentum=0.9)
elif optim_type == 'rmsprop':
optimizer = RMSprop(lr=learning_rate)
elif optim_type == 'adam':
optimizer = Adam(lr=learning_rate, decay=5e-4)
else:
raise ValueError('Unsupported optimizer type')
return optimizer
示例14: test_lstm_hourglass_basic
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def test_lstm_hourglass_basic(self):
"""
Test that lstm_hourglass implements the correct parameters
"""
model = lstm_hourglass(
n_features=3,
func="tanh",
out_func="relu",
optimizer="SGD",
optimizer_kwargs={"lr": 0.02, "momentum": 0.001},
compile_kwargs={"loss": "mae"},
)
# Ensure that the input dimension to Keras model matches the number of features.
self.assertEqual(model.layers[0].input_shape[2], 3)
# Ensure that the dimension of each encoding layer matches the expected dimension.
self.assertEqual(
[model.layers[i].input_shape[2] for i in range(1, 4)], [3, 2, 2]
)
# Ensure that the dimension of each decoding layer (excluding last decoding layer)
# matches the expected dimension.
self.assertEqual([model.layers[i].input_shape[2] for i in range(4, 6)], [2, 2])
# Ensure that the dimension of last decoding layer matches the expected dimension.
self.assertEqual(model.layers[6].input_shape[1], 3)
# Ensure activation functions in the encoding part (layers 0-2)
# match expected activation functions
self.assertEqual(
[model.layers[i].activation.__name__ for i in range(0, 3)],
["tanh", "tanh", "tanh"],
)
# Ensure activation functions in the decoding part (layers 3-5)
# match expected activation functions
self.assertEqual(
[model.layers[i].activation.__name__ for i in range(3, 6)],
["tanh", "tanh", "tanh"],
)
# Ensure activation function for the output layer matches expected activation function
self.assertEqual(model.layers[6].activation.__name__, "relu")
# Assert that the expected Keras optimizer is used
self.assertEqual(model.optimizer.__class__, optimizers.SGD)
# Assert that the correct loss function is used.
self.assertEqual(model.loss, "mae")
示例15: test_lstm_symmetric_basic
# 需要导入模块: from tensorflow.keras import optimizers [as 别名]
# 或者: from tensorflow.keras.optimizers import SGD [as 别名]
def test_lstm_symmetric_basic(n_features, n_features_out):
"""
Tests that lstm_symmetric implements the correct parameters
"""
model = lstm_symmetric(
n_features=n_features,
n_features_out=n_features_out,
lookback_window=3,
dims=(4, 3, 2, 1),
funcs=("relu", "relu", "tanh", "tanh"),
out_func="linear",
optimizer="SGD",
optimizer_kwargs={"lr": 0.01},
loss="mse",
)
# Ensure that the input dimension to Keras model matches the number of features.
assert model.layers[0].input_shape[2] == n_features
# Ensure that the dimension of each encoding layer matches the expected dimension.
assert [model.layers[i].input_shape[2] for i in range(1, 5)] == [4, 3, 2, 1]
# Ensure that the dimension of each decoding layer (excluding last decoding layer)
# matches the expected dimension.
assert [model.layers[i].input_shape[2] for i in range(5, 8)] == [1, 2, 3]
# Ensure that the dimension of last decoding layer matches the expected dimension.
assert model.layers[8].input_shape[1] == 4
# Ensure activation functions in the encoding part (layers 0-3)
# match expected activation functions.
assert [model.layers[i].activation.__name__ for i in range(0, 4)] == [
"relu",
"relu",
"tanh",
"tanh",
]
# Ensure activation functions in the decoding part (layers 4-7)
# match expected activation functions.
assert [model.layers[i].activation.__name__ for i in range(4, 8)] == [
"tanh",
"tanh",
"relu",
"relu",
]
# Ensure activation function for the output layer matches expected activation function.
assert model.layers[8].activation.__name__ == "linear"
# Assert that the expected Keras optimizer is used
assert model.optimizer.__class__ == optimizers.SGD
# Assert that the correct loss function is used.
assert model.loss == "mse"