本文整理汇总了Python中keras.optimizers.Optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.Optimizer方法的具体用法?Python optimizers.Optimizer怎么用?Python optimizers.Optimizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.optimizers
的用法示例。
在下文中一共展示了optimizers.Optimizer方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def __init__(self, model, optimizer, loss, loss_weights, metrics=["accuracy"], features_col="features", label_col="label",
batch_size=32, num_epoch=1, learning_rate=1.0):
assert isinstance(optimizer, (str, Optimizer)), "'optimizer' must be a string or a Keras Optimizer instance"
assert isinstance(features_col, (str, list)), "'features_col' must be a string or a list of strings"
assert isinstance(label_col, (str, list)), "'label_col' must be a string or a list of strings"
self.model = model
self.optimizer = {'class_name': optimizer, 'config': {}} if isinstance(optimizer, str) else serialize(optimizer)
self.loss = loss
self.loss_weights = loss_weights
self.metrics= metrics
self.features_column = [features_col] if isinstance(features_col, str) else features_col
self.label_column = [label_col] if isinstance(label_col, str) else label_col
self.batch_size = batch_size
self.num_epoch = num_epoch
self.max_mini_batches = 100
self.prefetching_thread = None
self.mini_batches = None
self.is_prefetching = True
self.worker_id = -1
self.learning_rate = learning_rate
self.num_inputs = len(self.features_column)
self.num_outputs = len(self.label_column)
self.current_epoch = 0
示例2: prepare_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def prepare_model(model, optimizer, loss, metrics=('mse','mae'),
loss_bg_thresh=0, loss_bg_decay=0.06, Y=None):
""" TODO """
from keras.optimizers import Optimizer
isinstance(optimizer,Optimizer) or _raise(ValueError())
loss_standard = eval('loss_%s()'%loss)
_metrics = [eval('loss_%s()'%m) for m in metrics]
callbacks = [TerminateOnNaN()]
# checks
assert 0 <= loss_bg_thresh <= 1
assert loss_bg_thresh == 0 or Y is not None
if loss == 'laplace':
assert K.image_data_format() == "channels_last", "TODO"
assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0
# loss
if loss_bg_thresh == 0:
_loss = loss_standard
else:
freq = np.mean(Y > loss_bg_thresh)
# print("class frequency:", freq)
alpha = K.variable(1.0)
loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss))
_loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh,
0.5 / (0.1 + (1 - freq)),
0.5 / (0.1 + freq),
alpha)
callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha'))
if not loss in metrics:
_metrics.append(loss_standard)
# compile model
model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics)
return callbacks
示例3: compile
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def compile(self, *args, **kwargs):
'''Refer to Model.compile docstring for parameters. Override
functionality is documented below.
:override compile: Override Model.compile method to check for options
that the optimizer is multi-gpu enabled, and synchronize initial
variables.
'''
initsync = self._initsync
usenccl = self._usenccl
opt = kwargs['optimizer']
# if isinstance(opt, str):
if not isinstance(opt, KO.Optimizer):
opt = KO.get(opt)
kwargs['optimizer'] = opt
if self._syncopt and not getattr(opt, 'ismgpu', False):
raise RuntimeError(
'Multi-GPU synchronization model requires a multi-GPU '
'optimizer. Instead got: {}'.format(opt))
opt.usenccl = usenccl
if self._enqueue_ops:
# Produces a warning that kwargs are ignored for Tensorflow. Patch
# Function in tensorflow_backend to use the enqueue_ops option.
kwargs['fetches'] = self._enqueue_ops
super(ModelMGPU, self).compile(*args, **kwargs)
if initsync:
self._run_initsync()
示例4: check_model_parameters
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def check_model_parameters(self, model: Model, optimizer: Optimizer, loss: str, metrics) -> None:
self.assertIsInstance(model.optimizer, optimizer)
self.assertEqual(model.loss, loss)
self.assertEqual(model.metrics, metrics)
示例5: __init__
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def __init__(self,
input_size_per_time_step: int,
allowed_characters: List[chr],
use_raw_wave_input: bool = False,
activation: str = "relu",
output_activation: str = "softmax",
optimizer: Optimizer = Adam(1e-4),
dropout: Optional[float] = None,
load_model_from_directory: Optional[Path] = None,
load_epoch: Optional[int] = None,
allowed_characters_for_loaded_model: Optional[List[chr]] = None,
frozen_layer_count: int = 0,
reinitialize_trainable_loaded_layers: bool = False,
use_asg: bool = False,
asg_transition_probabilities: Optional[ndarray] = None,
asg_initial_probabilities: Optional[ndarray] = None,
kenlm_directory: Path = None):
if frozen_layer_count > 0 and load_model_from_directory is None:
raise ValueError("Layers cannot be frozen if model is trained from scratch.")
self.kenlm_directory = kenlm_directory
self.grapheme_encoding = AsgGraphemeEncoding(allowed_characters=allowed_characters) \
if use_asg else CtcGraphemeEncoding(allowed_characters=allowed_characters)
self.asg_transition_probabilities = self._default_asg_transition_probabilities(
self.grapheme_encoding.grapheme_set_size) \
if asg_transition_probabilities is None else asg_transition_probabilities
self.asg_initial_probabilities = self._default_asg_initial_probabilities(
self.grapheme_encoding.grapheme_set_size) \
if asg_initial_probabilities is None else asg_initial_probabilities
self.use_asg = use_asg
self.frozen_layer_count = frozen_layer_count
self.output_activation = output_activation
self.activation = activation
self.use_raw_wave_input = use_raw_wave_input
self.input_size_per_time_step = input_size_per_time_step
self.optimizer = optimizer
self.load_epoch = load_epoch
self.dropout = dropout
self.predictive_net = self.create_predictive_net()
self.prediction_phase_flag = 0.
if self.kenlm_directory is not None:
expected_characters = list(
single(read_text(self.kenlm_directory / "vocabulary", encoding='utf8').splitlines()).lower())
if allowed_characters != expected_characters:
raise ValueError("Allowed characters {} differ from those expected by kenlm decoder: {}".
format(allowed_characters, expected_characters))
if load_model_from_directory is not None:
self.load_weights(
allowed_characters_for_loaded_model, load_epoch, load_model_from_directory,
loaded_first_layers_count=frozen_layer_count if reinitialize_trainable_loaded_layers else None)
示例6: compile
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import Optimizer [as 别名]
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
# optimizer = keras.optimizers.SGD(
# lr=learning_rate, momentum=momentum,
# clipnorm=self.config.GRADIENT_CLIP_NORM)
if self.config.OPTIMIZER == 'Adam':
optimizer = keras.optimizers.Adam(lr=learning_rate, epsilon = self.config.EPSILON)
elif self.config.OPTIMIZER == 'SGD':
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
else:
optimizer = SGDAccum(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM, accum_iters=self.config.ACCUM_ITERS)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
开发者ID:wwoody827,项目名称:cvpr-2018-autonomous-driving-autopilot-solution,代码行数:59,代码来源:model_inceptionresnet.py