当前位置: 首页>>代码示例>>Python>>正文


Python callbacks.TerminateOnNaN方法代码示例

本文整理汇总了Python中keras.callbacks.TerminateOnNaN方法的典型用法代码示例。如果您正苦于以下问题:Python callbacks.TerminateOnNaN方法的具体用法?Python callbacks.TerminateOnNaN怎么用?Python callbacks.TerminateOnNaN使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.callbacks的用法示例。


在下文中一共展示了callbacks.TerminateOnNaN方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lengthy_test

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
开发者ID:flomlo,项目名称:ntm_keras,代码行数:24,代码来源:testing_utils.py

示例2: prepare_model

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def prepare_model(model, optimizer, loss, metrics=('mse','mae'),
                  loss_bg_thresh=0, loss_bg_decay=0.06, Y=None):
    """ TODO """

    from keras.optimizers import Optimizer
    isinstance(optimizer,Optimizer) or _raise(ValueError())


    loss_standard   = eval('loss_%s()'%loss)
    _metrics        = [eval('loss_%s()'%m) for m in metrics]
    callbacks       = [TerminateOnNaN()]

    # checks
    assert 0 <= loss_bg_thresh <= 1
    assert loss_bg_thresh == 0 or Y is not None
    if loss == 'laplace':
        assert K.image_data_format() == "channels_last", "TODO"
        assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0

    # loss
    if loss_bg_thresh == 0:
        _loss = loss_standard
    else:
        freq = np.mean(Y > loss_bg_thresh)
        # print("class frequency:", freq)
        alpha = K.variable(1.0)
        loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss))
        _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh,
                                           0.5 / (0.1 + (1 - freq)),
                                           0.5 / (0.1 +      freq),
                                           alpha)
        callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha'))
        if not loss in metrics:
            _metrics.append(loss_standard)


    # compile model
    model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics)

    return callbacks 
开发者ID:CSBDeep,项目名称:CSBDeep,代码行数:42,代码来源:train.py

示例3: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def train(self):
        self.intermediatemodelpath = os.path.join(self.modelname, 'model_e{epoch:02d}_v{val_loss:.4f}.h5')
        if self.usetensorboard:
            tensorboard = TensorBoard(log_dir=self.intermediatemodelpath + "logs/{}".format(time()))
            self.model.fit(self.train_x, self.train_y, verbose=1, callbacks=[tensorboard])
        else:
            if self.num_pretrain_epochs > 0:
                print('pretraining model to reproduce input data')
                self.history = self.model.fit(
                    self.train_y,
                    self.train_y,
                    batch_size=1024,
                    epochs=self.num_pretrain_epochs,
                    shuffle=True,
                    verbose=1,
                    callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)],
                    validation_data=(self.val_y, self.val_y))
            self.history = self.model.fit(
                self.train_x,
                self.train_y,
                batch_size=1024,
                epochs=self.num_epochs,
                shuffle=True,
                verbose=1,
                callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)],
                validation_data=(self.val_x, self.val_y))
        self.savemodel(usehdf=True)
        self.savemodel(usehdf=False)
        self.trained = True 
开发者ID:bbfrederick,项目名称:rapidtide,代码行数:31,代码来源:dlfilter.py

示例4: test_TerminateOnNaN

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def test_TerminateOnNaN():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN()]
    model = Sequential()
    initializer = initializers.Constant(value=1e5)
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
                        kernel_initializer=initializer))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    # case 1 fit
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf

    # case 2 fit_generator
    def data_generator():
        max_batch_index = len(X_train) // batch_size
        i = 0
        while 1:
            yield (X_train[i * batch_size: (i + 1) * batch_size],
                   y_train[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index
    history = model.fit_generator(data_generator(),
                                  len(X_train),
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf or np.isnan(loss[0]) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:46,代码来源:test_callbacks.py

示例5: test_stop_training_csv

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def test_stop_training_csv(tmpdir):
    np.random.seed(1337)
    fp = str(tmpdir / 'test.csv')
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
    model = Sequential()
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    def data_generator():
        i = 0
        max_batch_index = len(X_train) // batch_size
        tot = 0
        while 1:
            if tot > 3 * len(X_train):
                yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan
            else:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            i += 1
            tot += 1
            i = i % max_batch_index

    history = model.fit_generator(data_generator(),
                                  len(X_train) // batch_size,
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) > 1
    assert loss[-1] == np.inf or np.isnan(loss[-1])

    values = []
    with open(fp) as f:
        for x in reader(f):
            values.append(x)

    assert 'nan' in values[-1], 'The last epoch was not logged.'
    os.remove(fp) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:51,代码来源:test_callbacks.py

示例6: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import TerminateOnNaN [as 别名]
def train(model, game_model_name, epochs=None):
    if epochs is None:
        epochs = EPOCHS_PER_SAVE
    name = model.name
    base_name, index = name.split('_')
    new_name = "_".join([base_name, str(int(index) + 1)]) + ".h5"
    tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], new_name),
            histogram_freq=conf['HISTOGRAM_FREQ'], batch_size=BATCH_SIZE, write_graph=False, write_grads=False)
    nan_callback = TerminateOnNaN()

    directory = os.path.join("games", game_model_name)
    indices, weights = load_moves(directory)
    for epoch in tqdm.tqdm(range(epochs), desc="Epochs"):
        for worker in tqdm.tqdm(range(NUM_WORKERS), desc="Worker_batch"):

            chosen = choices(indices, weights, k = BATCH_SIZE)

            X = np.zeros((BATCH_SIZE, SIZE, SIZE, 17))
            policy_y = np.zeros((BATCH_SIZE, SIZE*SIZE + 1))
            value_y = np.zeros((BATCH_SIZE, 1))
            for j, (game_n, move) in enumerate(chosen):
                filename = os.path.join(directory, GAME_FILE % game_n)
                with h5py.File(filename, 'r') as f:
                    board = f['move_%s/board' % move][:]
                    policy = f['move_%s/policy_target' % move][:]
                    value_target = f['move_%s/value_target' % move][()]


                    X[j] = board
                    policy_y[j] = policy
                    value_y[j] = value_target

            fake_epoch = epoch * NUM_WORKERS + worker # For tensorboard
            model.fit(X, [policy_y, value_y],
                initial_epoch=fake_epoch,
                epochs=fake_epoch + 1,
                validation_split=VALIDATION_SPLIT, # Needed for TensorBoard histograms and gradi
                callbacks=[tf_callback, nan_callback],
                verbose=0,
            )
    model.name = new_name.split('.')[0]
    model.save(os.path.join(conf['MODEL_DIR'], new_name)) 
开发者ID:Narsil,项目名称:alphagozero,代码行数:44,代码来源:train.py


注:本文中的keras.callbacks.TerminateOnNaN方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。