当前位置: 首页>>代码示例>>Python>>正文


Python callbacks.LearningRateScheduler方法代码示例

本文整理汇总了Python中tensorflow.keras.callbacks.LearningRateScheduler方法的典型用法代码示例。如果您正苦于以下问题:Python callbacks.LearningRateScheduler方法的具体用法?Python callbacks.LearningRateScheduler怎么用?Python callbacks.LearningRateScheduler使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.callbacks的用法示例。


在下文中一共展示了callbacks.LearningRateScheduler方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(weights_path, epochs, batch_size, initial_epoch,
          kl_start_epoch, kl_alpha_increase_per_epoch):
    """Trains a model."""
    print ('loading data...')
    # Loads or creates training data.
    input_shape, train, valid, train_targets, valid_targets = get_train_data()
    print ('getting model...')
    # Loads or creates model.
    model, checkpoint_path, kl_alpha = get_model(input_shape,
                                        scale_factor=len(train)/batch_size,
                                        weights_path=weights_path)

    # Sets callbacks.
    checkpointer = ModelCheckpoint(checkpoint_path, verbose=1,
                                   save_weights_only=True, save_best_only=True)

    scheduler = LearningRateScheduler(schedule)
    annealer = Callback() if kl_alpha is None else AnnealingCallback(kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch)

    print ('fitting model...')
    # Trains model.
    model.fit(train, train_targets, batch_size, epochs,
              initial_epoch=initial_epoch,
              callbacks=[checkpointer, scheduler, annealer],
              validation_data=(valid, valid_targets)) 
开发者ID:sandialabs,项目名称:bcnn,代码行数:27,代码来源:train.py

示例2: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(lambd, sigma, n_centers, trial):
    K.clear_session()
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(200)

    model = create_models(sigma, n_centers)
    model.compile("adam", affinity_loss(lambd), [acc])
    tf.logging.set_verbosity(tf.logging.FATAL) # ログを埋めないようにする

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test, trial)

    history = model.fit(X_train, y_train, callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_f1 = max(f1.f1_log)
    print(f"lambda:{lambd:.04}, sigma:{sigma:.04} n_centers:{n_centers} / f1 = {max_f1:.04}")
    return max_f1 
开发者ID:koshian2,项目名称:affinity-loss,代码行数:24,代码来源:cnn_cifar_optuna_affinity.py

示例3: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size)
    y_train, y_test = y_train[:, :10], y_test[:, :10]

    model = create_models()
    model.compile("adam", "categorical_crossentropy", ["acc"])
    tf.logging.set_verbosity(tf.logging.FATAL)

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
开发者ID:koshian2,项目名称:affinity-loss,代码行数:24,代码来源:cnn_cifar_softmax.py

示例4: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size)

    model = create_models()
    model.compile("adam", affinity_loss(0.43), [acc])
    tf.logging.set_verbosity(tf.logging.FATAL)

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
开发者ID:koshian2,项目名称:affinity-loss,代码行数:23,代码来源:cnn_cifar_affinity.py

示例5: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(self):
        """Train an FCN"""
        optimizer = Adam(lr=1e-3)
        loss = 'categorical_crossentropy'
        self.fcn.compile(optimizer=optimizer, loss=loss)

        log = "# of classes %d" % self.n_classes
        print_log(log, self.args.verbose)
        log = "Batch size: %d" % self.args.batch_size
        print_log(log, self.args.verbose)

        # prepare callbacks for saving model weights
        # and learning rate scheduler
        # model weights are saved when test iou is highest
        # learning rate decreases by 50% every 20 epochs
        # after 40th epoch
        accuracy = AccuracyCallback(self)
        scheduler = LearningRateScheduler(lr_scheduler)

        callbacks = [accuracy, scheduler]
        # train the fcn network
        self.fcn.fit(x=self.train_generator,
                     use_multiprocessing=False,
                     callbacks=callbacks,
                     epochs=self.args.epochs)
                     #workers=self.args.workers) 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:28,代码来源:fcn-12.3.1.py

示例6: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(self):
        """Train function uses the data generator,
            accuracy computation, and learning rate
            scheduler callbacks
        """
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule,
                                             verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit(x=self.train_gen,
                        use_multiprocessing=False,
                        epochs=self.args.epochs,
                        callbacks=callbacks,
                        shuffle=True) 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:16,代码来源:iic-13.5.1.py

示例7: train

# 需要导入模块: from tensorflow.keras import callbacks [as 别名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 别名]
def train(self):
        """Train MINE to estimate MI between 
            X and Y (eg MNIST image and its transformed
            version)
        """
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule,
                                             verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit(x=self.train_gen,
                        use_multiprocessing=False,
                        epochs=self.args.epochs,
                        callbacks=callbacks,
                        shuffle=True) 
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:16,代码来源:mine-13.8.1.py


注:本文中的tensorflow.keras.callbacks.LearningRateScheduler方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。