當前位置: 首頁>>代碼示例>>Python>>正文


Python callbacks.TerminateOnNaN方法代碼示例

本文整理匯總了Python中keras.callbacks.TerminateOnNaN方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.TerminateOnNaN方法的具體用法?Python callbacks.TerminateOnNaN怎麽用?Python callbacks.TerminateOnNaN使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.callbacks的用法示例。


在下文中一共展示了callbacks.TerminateOnNaN方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: lengthy_test

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
開發者ID:flomlo,項目名稱:ntm_keras,代碼行數:24,代碼來源:testing_utils.py

示例2: prepare_model

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def prepare_model(model, optimizer, loss, metrics=('mse','mae'),
                  loss_bg_thresh=0, loss_bg_decay=0.06, Y=None):
    """ TODO """

    from keras.optimizers import Optimizer
    isinstance(optimizer,Optimizer) or _raise(ValueError())


    loss_standard   = eval('loss_%s()'%loss)
    _metrics        = [eval('loss_%s()'%m) for m in metrics]
    callbacks       = [TerminateOnNaN()]

    # checks
    assert 0 <= loss_bg_thresh <= 1
    assert loss_bg_thresh == 0 or Y is not None
    if loss == 'laplace':
        assert K.image_data_format() == "channels_last", "TODO"
        assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0

    # loss
    if loss_bg_thresh == 0:
        _loss = loss_standard
    else:
        freq = np.mean(Y > loss_bg_thresh)
        # print("class frequency:", freq)
        alpha = K.variable(1.0)
        loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss))
        _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh,
                                           0.5 / (0.1 + (1 - freq)),
                                           0.5 / (0.1 +      freq),
                                           alpha)
        callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha'))
        if not loss in metrics:
            _metrics.append(loss_standard)


    # compile model
    model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics)

    return callbacks 
開發者ID:CSBDeep,項目名稱:CSBDeep,代碼行數:42,代碼來源:train.py

示例3: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def train(self):
        self.intermediatemodelpath = os.path.join(self.modelname, 'model_e{epoch:02d}_v{val_loss:.4f}.h5')
        if self.usetensorboard:
            tensorboard = TensorBoard(log_dir=self.intermediatemodelpath + "logs/{}".format(time()))
            self.model.fit(self.train_x, self.train_y, verbose=1, callbacks=[tensorboard])
        else:
            if self.num_pretrain_epochs > 0:
                print('pretraining model to reproduce input data')
                self.history = self.model.fit(
                    self.train_y,
                    self.train_y,
                    batch_size=1024,
                    epochs=self.num_pretrain_epochs,
                    shuffle=True,
                    verbose=1,
                    callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)],
                    validation_data=(self.val_y, self.val_y))
            self.history = self.model.fit(
                self.train_x,
                self.train_y,
                batch_size=1024,
                epochs=self.num_epochs,
                shuffle=True,
                verbose=1,
                callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)],
                validation_data=(self.val_x, self.val_y))
        self.savemodel(usehdf=True)
        self.savemodel(usehdf=False)
        self.trained = True 
開發者ID:bbfrederick,項目名稱:rapidtide,代碼行數:31,代碼來源:dlfilter.py

示例4: test_TerminateOnNaN

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def test_TerminateOnNaN():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN()]
    model = Sequential()
    initializer = initializers.Constant(value=1e5)
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
                        kernel_initializer=initializer))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    # case 1 fit
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf

    # case 2 fit_generator
    def data_generator():
        max_batch_index = len(X_train) // batch_size
        i = 0
        while 1:
            yield (X_train[i * batch_size: (i + 1) * batch_size],
                   y_train[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index
    history = model.fit_generator(data_generator(),
                                  len(X_train),
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf or np.isnan(loss[0]) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:46,代碼來源:test_callbacks.py

示例5: test_stop_training_csv

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def test_stop_training_csv(tmpdir):
    np.random.seed(1337)
    fp = str(tmpdir / 'test.csv')
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
    model = Sequential()
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    def data_generator():
        i = 0
        max_batch_index = len(X_train) // batch_size
        tot = 0
        while 1:
            if tot > 3 * len(X_train):
                yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan
            else:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            i += 1
            tot += 1
            i = i % max_batch_index

    history = model.fit_generator(data_generator(),
                                  len(X_train) // batch_size,
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) > 1
    assert loss[-1] == np.inf or np.isnan(loss[-1])

    values = []
    with open(fp) as f:
        for x in reader(f):
            values.append(x)

    assert 'nan' in values[-1], 'The last epoch was not logged.'
    os.remove(fp) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:51,代碼來源:test_callbacks.py

示例6: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import TerminateOnNaN [as 別名]
def train(model, game_model_name, epochs=None):
    if epochs is None:
        epochs = EPOCHS_PER_SAVE
    name = model.name
    base_name, index = name.split('_')
    new_name = "_".join([base_name, str(int(index) + 1)]) + ".h5"
    tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], new_name),
            histogram_freq=conf['HISTOGRAM_FREQ'], batch_size=BATCH_SIZE, write_graph=False, write_grads=False)
    nan_callback = TerminateOnNaN()

    directory = os.path.join("games", game_model_name)
    indices, weights = load_moves(directory)
    for epoch in tqdm.tqdm(range(epochs), desc="Epochs"):
        for worker in tqdm.tqdm(range(NUM_WORKERS), desc="Worker_batch"):

            chosen = choices(indices, weights, k = BATCH_SIZE)

            X = np.zeros((BATCH_SIZE, SIZE, SIZE, 17))
            policy_y = np.zeros((BATCH_SIZE, SIZE*SIZE + 1))
            value_y = np.zeros((BATCH_SIZE, 1))
            for j, (game_n, move) in enumerate(chosen):
                filename = os.path.join(directory, GAME_FILE % game_n)
                with h5py.File(filename, 'r') as f:
                    board = f['move_%s/board' % move][:]
                    policy = f['move_%s/policy_target' % move][:]
                    value_target = f['move_%s/value_target' % move][()]


                    X[j] = board
                    policy_y[j] = policy
                    value_y[j] = value_target

            fake_epoch = epoch * NUM_WORKERS + worker # For tensorboard
            model.fit(X, [policy_y, value_y],
                initial_epoch=fake_epoch,
                epochs=fake_epoch + 1,
                validation_split=VALIDATION_SPLIT, # Needed for TensorBoard histograms and gradi
                callbacks=[tf_callback, nan_callback],
                verbose=0,
            )
    model.name = new_name.split('.')[0]
    model.save(os.path.join(conf['MODEL_DIR'], new_name)) 
開發者ID:Narsil,項目名稱:alphagozero,代碼行數:44,代碼來源:train.py


注:本文中的keras.callbacks.TerminateOnNaN方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。