當前位置: 首頁>>代碼示例>>Python>>正文


Python callbacks.LearningRateScheduler方法代碼示例

本文整理匯總了Python中tensorflow.keras.callbacks.LearningRateScheduler方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.LearningRateScheduler方法的具體用法?Python callbacks.LearningRateScheduler怎麽用?Python callbacks.LearningRateScheduler使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.callbacks的用法示例。


在下文中一共展示了callbacks.LearningRateScheduler方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(weights_path, epochs, batch_size, initial_epoch,
          kl_start_epoch, kl_alpha_increase_per_epoch):
    """Trains a model."""
    print ('loading data...')
    # Loads or creates training data.
    input_shape, train, valid, train_targets, valid_targets = get_train_data()
    print ('getting model...')
    # Loads or creates model.
    model, checkpoint_path, kl_alpha = get_model(input_shape,
                                        scale_factor=len(train)/batch_size,
                                        weights_path=weights_path)

    # Sets callbacks.
    checkpointer = ModelCheckpoint(checkpoint_path, verbose=1,
                                   save_weights_only=True, save_best_only=True)

    scheduler = LearningRateScheduler(schedule)
    annealer = Callback() if kl_alpha is None else AnnealingCallback(kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch)

    print ('fitting model...')
    # Trains model.
    model.fit(train, train_targets, batch_size, epochs,
              initial_epoch=initial_epoch,
              callbacks=[checkpointer, scheduler, annealer],
              validation_data=(valid, valid_targets)) 
開發者ID:sandialabs,項目名稱:bcnn,代碼行數:27,代碼來源:train.py

示例2: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(lambd, sigma, n_centers, trial):
    K.clear_session()
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(200)

    model = create_models(sigma, n_centers)
    model.compile("adam", affinity_loss(lambd), [acc])
    tf.logging.set_verbosity(tf.logging.FATAL) # ログを埋めないようにする

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test, trial)

    history = model.fit(X_train, y_train, callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_f1 = max(f1.f1_log)
    print(f"lambda:{lambd:.04}, sigma:{sigma:.04} n_centers:{n_centers} / f1 = {max_f1:.04}")
    return max_f1 
開發者ID:koshian2,項目名稱:affinity-loss,代碼行數:24,代碼來源:cnn_cifar_optuna_affinity.py

示例3: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size)
    y_train, y_test = y_train[:, :10], y_test[:, :10]

    model = create_models()
    model.compile("adam", "categorical_crossentropy", ["acc"])
    tf.logging.set_verbosity(tf.logging.FATAL)

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
開發者ID:koshian2,項目名稱:affinity-loss,代碼行數:24,代碼來源:cnn_cifar_softmax.py

示例4: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size)

    model = create_models()
    model.compile("adam", affinity_loss(0.43), [acc])
    tf.logging.set_verbosity(tf.logging.FATAL)

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
開發者ID:koshian2,項目名稱:affinity-loss,代碼行數:23,代碼來源:cnn_cifar_affinity.py

示例5: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(self):
        """Train an FCN"""
        optimizer = Adam(lr=1e-3)
        loss = 'categorical_crossentropy'
        self.fcn.compile(optimizer=optimizer, loss=loss)

        log = "# of classes %d" % self.n_classes
        print_log(log, self.args.verbose)
        log = "Batch size: %d" % self.args.batch_size
        print_log(log, self.args.verbose)

        # prepare callbacks for saving model weights
        # and learning rate scheduler
        # model weights are saved when test iou is highest
        # learning rate decreases by 50% every 20 epochs
        # after 40th epoch
        accuracy = AccuracyCallback(self)
        scheduler = LearningRateScheduler(lr_scheduler)

        callbacks = [accuracy, scheduler]
        # train the fcn network
        self.fcn.fit(x=self.train_generator,
                     use_multiprocessing=False,
                     callbacks=callbacks,
                     epochs=self.args.epochs)
                     #workers=self.args.workers) 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:28,代碼來源:fcn-12.3.1.py

示例6: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(self):
        """Train function uses the data generator,
            accuracy computation, and learning rate
            scheduler callbacks
        """
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule,
                                             verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit(x=self.train_gen,
                        use_multiprocessing=False,
                        epochs=self.args.epochs,
                        callbacks=callbacks,
                        shuffle=True) 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:16,代碼來源:iic-13.5.1.py

示例7: train

# 需要導入模塊: from tensorflow.keras import callbacks [as 別名]
# 或者: from tensorflow.keras.callbacks import LearningRateScheduler [as 別名]
def train(self):
        """Train MINE to estimate MI between 
            X and Y (eg MNIST image and its transformed
            version)
        """
        accuracy = AccuracyCallback(self)
        lr_scheduler = LearningRateScheduler(lr_schedule,
                                             verbose=1)
        callbacks = [accuracy, lr_scheduler]
        self._model.fit(x=self.train_gen,
                        use_multiprocessing=False,
                        epochs=self.args.epochs,
                        callbacks=callbacks,
                        shuffle=True) 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:16,代碼來源:mine-13.8.1.py


注:本文中的tensorflow.keras.callbacks.LearningRateScheduler方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。