當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizers.Optimizer方法代碼示例

本文整理匯總了Python中keras.optimizers.Optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizers.Optimizer方法的具體用法?Python optimizers.Optimizer怎麽用?Python optimizers.Optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.optimizers的用法示例。


在下文中一共展示了optimizers.Optimizer方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def __init__(self, model, optimizer, loss, loss_weights, metrics=["accuracy"], features_col="features", label_col="label",
                 batch_size=32, num_epoch=1, learning_rate=1.0):
        assert isinstance(optimizer, (str, Optimizer)), "'optimizer' must be a string or a Keras Optimizer instance"
        assert isinstance(features_col, (str, list)), "'features_col' must be a string or a list of strings"
        assert isinstance(label_col, (str, list)), "'label_col' must be a string or a list of strings"
        self.model = model
        self.optimizer = {'class_name': optimizer, 'config': {}} if isinstance(optimizer, str) else serialize(optimizer)
        self.loss = loss
        self.loss_weights = loss_weights
        self.metrics= metrics
        self.features_column = [features_col] if isinstance(features_col, str) else features_col
        self.label_column = [label_col] if isinstance(label_col, str) else label_col
        self.batch_size = batch_size
        self.num_epoch = num_epoch
        self.max_mini_batches = 100
        self.prefetching_thread = None
        self.mini_batches = None
        self.is_prefetching = True
        self.worker_id = -1
        self.learning_rate = learning_rate
        self.num_inputs = len(self.features_column)
        self.num_outputs = len(self.label_column)
        self.current_epoch = 0 
開發者ID:cerndb,項目名稱:dist-keras,代碼行數:25,代碼來源:workers.py

示例2: prepare_model

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def prepare_model(model, optimizer, loss, metrics=('mse','mae'),
                  loss_bg_thresh=0, loss_bg_decay=0.06, Y=None):
    """ TODO """

    from keras.optimizers import Optimizer
    isinstance(optimizer,Optimizer) or _raise(ValueError())


    loss_standard   = eval('loss_%s()'%loss)
    _metrics        = [eval('loss_%s()'%m) for m in metrics]
    callbacks       = [TerminateOnNaN()]

    # checks
    assert 0 <= loss_bg_thresh <= 1
    assert loss_bg_thresh == 0 or Y is not None
    if loss == 'laplace':
        assert K.image_data_format() == "channels_last", "TODO"
        assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0

    # loss
    if loss_bg_thresh == 0:
        _loss = loss_standard
    else:
        freq = np.mean(Y > loss_bg_thresh)
        # print("class frequency:", freq)
        alpha = K.variable(1.0)
        loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss))
        _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh,
                                           0.5 / (0.1 + (1 - freq)),
                                           0.5 / (0.1 +      freq),
                                           alpha)
        callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha'))
        if not loss in metrics:
            _metrics.append(loss_standard)


    # compile model
    model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics)

    return callbacks 
開發者ID:CSBDeep,項目名稱:CSBDeep,代碼行數:42,代碼來源:train.py

示例3: compile

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def compile(self, *args, **kwargs):
        '''Refer to Model.compile docstring for parameters. Override
        functionality is documented below.

        :override compile: Override Model.compile method to check for options
            that the optimizer is multi-gpu enabled, and synchronize initial
            variables.
        '''
        initsync = self._initsync
        usenccl = self._usenccl

        opt = kwargs['optimizer']
        # if isinstance(opt, str):
        if not isinstance(opt, KO.Optimizer):
            opt = KO.get(opt)
            kwargs['optimizer'] = opt

        if self._syncopt and not getattr(opt, 'ismgpu', False):
            raise RuntimeError(
                'Multi-GPU synchronization model requires a multi-GPU '
                'optimizer. Instead got: {}'.format(opt))

        opt.usenccl = usenccl

        if self._enqueue_ops:
            # Produces a warning that kwargs are ignored for Tensorflow. Patch
            # Function in tensorflow_backend to use the enqueue_ops option.
            kwargs['fetches'] = self._enqueue_ops

        super(ModelMGPU, self).compile(*args, **kwargs)

        if initsync:
            self._run_initsync() 
開發者ID:avolkov1,項目名稱:keras_experiments,代碼行數:35,代碼來源:_multigpu_with_nccl.py

示例4: check_model_parameters

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def check_model_parameters(self, model: Model, optimizer: Optimizer, loss: str, metrics) -> None:
        self.assertIsInstance(model.optimizer, optimizer)
        self.assertEqual(model.loss, loss)
        self.assertEqual(model.metrics, metrics) 
開發者ID:rodrigobressan,項目名稱:entity_embeddings_categorical,代碼行數:6,代碼來源:test_assembler.py

示例5: __init__

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def __init__(self,
                 input_size_per_time_step: int,
                 allowed_characters: List[chr],
                 use_raw_wave_input: bool = False,
                 activation: str = "relu",
                 output_activation: str = "softmax",
                 optimizer: Optimizer = Adam(1e-4),
                 dropout: Optional[float] = None,
                 load_model_from_directory: Optional[Path] = None,
                 load_epoch: Optional[int] = None,
                 allowed_characters_for_loaded_model: Optional[List[chr]] = None,
                 frozen_layer_count: int = 0,
                 reinitialize_trainable_loaded_layers: bool = False,
                 use_asg: bool = False,
                 asg_transition_probabilities: Optional[ndarray] = None,
                 asg_initial_probabilities: Optional[ndarray] = None,
                 kenlm_directory: Path = None):

        if frozen_layer_count > 0 and load_model_from_directory is None:
            raise ValueError("Layers cannot be frozen if model is trained from scratch.")

        self.kenlm_directory = kenlm_directory
        self.grapheme_encoding = AsgGraphemeEncoding(allowed_characters=allowed_characters) \
            if use_asg else CtcGraphemeEncoding(allowed_characters=allowed_characters)

        self.asg_transition_probabilities = self._default_asg_transition_probabilities(
            self.grapheme_encoding.grapheme_set_size) \
            if asg_transition_probabilities is None else asg_transition_probabilities

        self.asg_initial_probabilities = self._default_asg_initial_probabilities(
            self.grapheme_encoding.grapheme_set_size) \
            if asg_initial_probabilities is None else asg_initial_probabilities

        self.use_asg = use_asg
        self.frozen_layer_count = frozen_layer_count
        self.output_activation = output_activation
        self.activation = activation
        self.use_raw_wave_input = use_raw_wave_input
        self.input_size_per_time_step = input_size_per_time_step
        self.optimizer = optimizer
        self.load_epoch = load_epoch
        self.dropout = dropout
        self.predictive_net = self.create_predictive_net()
        self.prediction_phase_flag = 0.

        if self.kenlm_directory is not None:
            expected_characters = list(
                single(read_text(self.kenlm_directory / "vocabulary", encoding='utf8').splitlines()).lower())

            if allowed_characters != expected_characters:
                raise ValueError("Allowed characters {} differ from those expected by kenlm decoder: {}".
                                 format(allowed_characters, expected_characters))

        if load_model_from_directory is not None:
            self.load_weights(
                allowed_characters_for_loaded_model, load_epoch, load_model_from_directory,
                loaded_first_layers_count=frozen_layer_count if reinitialize_trainable_loaded_layers else None) 
開發者ID:JuliusKunze,項目名稱:speechless,代碼行數:59,代碼來源:net.py

示例6: compile

# 需要導入模塊: from keras import optimizers [as 別名]
# 或者: from keras.optimizers import Optimizer [as 別名]
def compile(self, learning_rate, momentum):
        """Gets the model ready for training. Adds losses, regularization, and
        metrics. Then calls the Keras compile() function.
        """
        # Optimizer object
        # optimizer = keras.optimizers.SGD(
        #     lr=learning_rate, momentum=momentum,
        #     clipnorm=self.config.GRADIENT_CLIP_NORM)
        if self.config.OPTIMIZER == 'Adam':
            optimizer = keras.optimizers.Adam(lr=learning_rate, epsilon = self.config.EPSILON)
        elif self.config.OPTIMIZER == 'SGD':
            optimizer = keras.optimizers.SGD(
            lr=learning_rate, momentum=momentum,
            clipnorm=self.config.GRADIENT_CLIP_NORM)
        else:
            optimizer = SGDAccum(
            lr=learning_rate, momentum=momentum,
            clipnorm=self.config.GRADIENT_CLIP_NORM, accum_iters=self.config.ACCUM_ITERS)
        # Add Losses
        # First, clear previously set losses to avoid duplication
        self.keras_model._losses = []
        self.keras_model._per_input_losses = {}
        loss_names = [
            "rpn_class_loss",  "rpn_bbox_loss",
            "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
        for name in loss_names:
            layer = self.keras_model.get_layer(name)
            if layer.output in self.keras_model.losses:
                continue
            loss = (
                tf.reduce_mean(layer.output, keepdims=True)
                * self.config.LOSS_WEIGHTS.get(name, 1.))
            self.keras_model.add_loss(loss)

        # Add L2 Regularization
        # Skip gamma and beta weights of batch normalization layers.
        reg_losses = [
            keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
            for w in self.keras_model.trainable_weights
            if 'gamma' not in w.name and 'beta' not in w.name]
        self.keras_model.add_loss(tf.add_n(reg_losses))

        # Compile
        self.keras_model.compile(
            optimizer=optimizer,
            loss=[None] * len(self.keras_model.outputs))

        # Add metrics for losses
        for name in loss_names:
            if name in self.keras_model.metrics_names:
                continue
            layer = self.keras_model.get_layer(name)
            self.keras_model.metrics_names.append(name)
            loss = (
                tf.reduce_mean(layer.output, keepdims=True)
                * self.config.LOSS_WEIGHTS.get(name, 1.))
            self.keras_model.metrics_tensors.append(loss) 
開發者ID:wwoody827,項目名稱:cvpr-2018-autonomous-driving-autopilot-solution,代碼行數:59,代碼來源:model_inceptionresnet.py


注:本文中的keras.optimizers.Optimizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。