當前位置: 首頁>>代碼示例>>Python>>正文


Python callbacks.EarlyStopping方法代碼示例

本文整理匯總了Python中keras.callbacks.EarlyStopping方法的典型用法代碼示例。如果您正苦於以下問題:Python callbacks.EarlyStopping方法的具體用法?Python callbacks.EarlyStopping怎麽用?Python callbacks.EarlyStopping使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.callbacks的用法示例。


在下文中一共展示了callbacks.EarlyStopping方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train(self, X, y, validation_data):
    print('Training model...')
    multitask = y.shape[1] > 1
    if not multitask:
      num_positives = y.sum()
      num_sequences = len(y)
      num_negatives = num_sequences - num_positives
    self.model.fit(
        X,
        y,
        batch_size=128,
        nb_epoch=100,
        validation_data=validation_data,
        class_weight={
            True: num_sequences / num_positives,
            False: num_sequences / num_negatives
        } if not multitask else None,
        callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
        verbose=True) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:21,代碼來源:models.py

示例2: train_sequential

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train_sequential(model, X, y, where_to_save, fit_params=None, monitor='val_acc'):
    # TODO: DOCUMENT once thoroughly tested
    # Watch out: where_to_save might be inside fit_params

    if fit_params is None:
        fit_params = {
            "batch_size": 32,
            "nb_epoch": 45,
            "verbose": True,
            "validation_split": 0.15,
            "show_accuracy": True,
            "callbacks": [EarlyStopping(verbose=True, patience=5, monitor=monitor),
                          ModelCheckpoint(where_to_save, monitor=monitor, verbose=True, save_best_only=True)]
        }
    print 'Fitting! Hit CTRL-C to stop early...'
    history = "Nothing to show"
    try:
        history = model.fit(X, y, **fit_params)
    except KeyboardInterrupt:
        print "Training stopped early!"
        history = model.history

    return history 
開發者ID:textclf,項目名稱:fancy-cnn,代碼行數:25,代碼來源:train_neural.py

示例3: train_model

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train_model(self):

		checkpoint = ModelCheckpoint(self.PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

		if self.modality == "audio":
			model = self.get_audio_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "text":
			model = self.get_text_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "bimodal":
			model = self.get_bimodal_model()
			model.compile(optimizer='adam', loss='categorical_crossentropy', sample_weight_mode='temporal')

		early_stopping = EarlyStopping(monitor='val_loss', patience=10)
		model.fit(self.train_x, self.train_y,
		                epochs=self.epochs,
		                batch_size=self.batch_size,
		                sample_weight=self.train_mask,
		                shuffle=True, 
		                callbacks=[early_stopping, checkpoint],
		                validation_data=(self.val_x, self.val_y, self.val_mask))

		self.test_model() 
開發者ID:declare-lab,項目名稱:MELD,代碼行數:26,代碼來源:baseline.py

示例4: LSTM

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def LSTM(self, argsDict):
        self.paras.batch_size             = argsDict["batch_size"]
        self.paras.model['dropout']       = argsDict['dropout']
        self.paras.model['activation']    = argsDict["activation"]
        self.paras.model['optimizer']     = argsDict["optimizer"]
        self.paras.model['learning_rate'] = argsDict["learning_rate"]

        print(self.paras.batch_size, self.paras.model['dropout'], self.paras.model['activation'], self.paras.model['optimizer'], self.paras.model['learning_rate'])

        model = self.lstm_model()
        model.fit(self.train_x, self.train_y,
              batch_size=self.paras.batch_size,
              epochs=self.paras.epoch,
              verbose=0,
              callbacks=[EarlyStopping(monitor='loss', patience=5)]
              )

        score, mse = model.evaluate(self.test_x, self.test_y, verbose=0)
        y_pred=model.predict(self.test_x)
        reca=Recall_s(self.test_y,y_pred)
        return -reca 
開發者ID:doncat99,項目名稱:StockRecommendSystem,代碼行數:23,代碼來源:Stock_Prediction_Model_Stateless_LSTM.py

示例5: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train(model, image_data, y_true, log_dir='logs/'):
    '''retrain/fine-tune the model'''
    model.compile(optimizer='adam', loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
        monitor='val_loss', save_weights_only=True, save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')

    model.fit([image_data, *y_true],
              np.zeros(len(image_data)),
              validation_split=.1,
              batch_size=32,
              epochs=30,
              callbacks=[logging, checkpoint, early_stopping])
    model.save_weights(log_dir + 'trained_weights.h5')
    # Further training. 
開發者ID:scutan90,項目名稱:YOLO-3D-Box,代碼行數:21,代碼來源:train.py

示例6: train_model

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train_model(self,model,X_train,X_test,y_train,y_test):
        input_y_train = self.include_start_token(y_train)
        print(input_y_train.shape)
        input_y_test = self.include_start_token(y_test)
        print(input_y_test.shape)
        early = EarlyStopping(monitor='val_loss',patience=10,mode='auto')

        checkpoint = ModelCheckpoint(self.outpath + 's2s_model_' + str(self.version) + '_.h5',monitor='val_loss',verbose=1,save_best_only=True,mode='auto')
        lr_reduce = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=2, verbose=0, mode='auto')
        model.fit([X_train,input_y_train],y_train, 
		      epochs=self.epochs,
		      batch_size=self.batch_size, 
		      validation_data=[[X_test,input_y_test],y_test], 
		      callbacks=[early,checkpoint,lr_reduce], 
		      shuffle=True)
        return model 
開發者ID:PacktPublishing,項目名稱:Intelligent-Projects-Using-Python,代碼行數:18,代碼來源:chatbot.py

示例7: NerCallbacks

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def NerCallbacks(id_to_tag, best_fit_params=None, mask_tag=None, log_path=None):
    """模型訓練過程中的回調函數
    """
    callbacks = [Accuracy(id_to_tag, mask_tag, log_path)]
    if best_fit_params is not None:
        early_stopping = EarlyStopping(
            monitor="val_crf_accuracy",
            patience=best_fit_params.get("early_stop_patience"))
        reduce_lr_on_plateau = ReduceLROnPlateau(
            monitor="val_crf_accuracy", verbose=1, mode="max",
            factor=best_fit_params.get("reduce_lr_factor"),
            patience=best_fit_params.get("reduce_lr_patience"))
        model_check_point = ModelCheckpoint(
            best_fit_params.get("save_path"),
            monitor="val_crf_accuracy", verbose=2, mode="max", save_best_only=True)
        callbacks.extend([early_stopping, reduce_lr_on_plateau, model_check_point])
    return callbacks 
開發者ID:liushaoweihua,項目名稱:keras-bert-ner,代碼行數:19,代碼來源:callbacks.py

示例8: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
開發者ID:moxiu2012,項目名稱:PJ_NLP,代碼行數:25,代碼來源:train.py

示例9: train

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
    
    # callbacks
    ear = EarlyStopping(monitor='val_acc', patience=5)
    mcp = ModelCheckpoint(join(save_path, 'malconv.h5'), 
                          monitor="val_acc", 
                          save_best_only=save_best, 
                          save_weights_only=False)
    
    history = model.fit_generator(
        utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
        steps_per_epoch=len(x_train)//batch_size + 1,
        epochs=epochs, 
        verbose=verbose, 
        callbacks=[ear, mcp],
        validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
        validation_steps=len(x_test)//batch_size + 1)
    return history 
開發者ID:j40903272,項目名稱:MalConv-keras,代碼行數:20,代碼來源:train.py

示例10: finetuning_callbacks

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def finetuning_callbacks(checkpoint_path, patience, verbose):
    """ Callbacks for model training.

    # Arguments:
        checkpoint_path: Where weight checkpoints should be saved.
        patience: Number of epochs with no improvement after which
            training will be stopped.

    # Returns:
        Array with training callbacks that can be passed straight into
        model.fit() or similar.
    """
    cb_verbose = (verbose >= 2)
    checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
                                   save_best_only=True, verbose=cb_verbose)
    earlystop = EarlyStopping(monitor='val_loss', patience=patience,
                              verbose=cb_verbose)
    return [checkpointer, earlystop] 
開發者ID:bfelbo,項目名稱:DeepMoji,代碼行數:20,代碼來源:finetuning.py

示例11: init_logging_callbacks

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def init_logging_callbacks(self,log_dir=LOG_DIR_ROOT):

		self.checkpoint = ModelCheckpoint(filepath="%s/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" % (log_dir),\
														monitor='loss',\
														verbose=1,\
														save_best_only=True,\
														mode='min')

		self.early_stopping = EarlyStopping(monitor='loss',\
													min_delta=0,\
													patience=PATIENCE,\
													verbose=0,\
													mode='auto')	

		now = datetime.utcnow().strftime("%Y%m%d%H%M%S")	
		log_dir = "{}/run/{}".format(LOG_DIR_ROOT,now)
		self.tensorboard = TensorBoard(log_dir=log_dir,\
											write_graph=True,\
											write_images=True)
		
		self.callbacks = [self.early_stopping,\
								self.tensorboard,\
								self.checkpoint] 
開發者ID:k3170makan,項目名稱:PyMLProjects,代碼行數:25,代碼來源:model.py

示例12: get_callbacks

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def get_callbacks(config_data, appendix=''):
    ret_callbacks = []
    model_stored = False
    callbacks = config_data['callbacks']
    if K._BACKEND == 'tensorflow':
        tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
        ret_callbacks.append(tensor_board)
    for callback in callbacks:
        if callback['name'] == 'early_stopping':
            ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
        elif callback['name'] == 'model_checkpoit':
            model_stored = True
            path = config_data['output_path']
            basename = config_data['output_basename']
            base_path = os.path.join(path, basename)
            opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
            save_best = bool(callback['save_best_only'])
            ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
    return ret_callbacks, model_stored 
開發者ID:spinningbytes,項目名稱:deep-mlsa,代碼行數:21,代碼來源:run_utils.py

示例13: test_EarlyStopping_reuse

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def test_EarlyStopping_reuse():
    np.random.seed(1337)
    patience = 3
    data = np.random.random((100, 1))
    labels = np.where(data > 0.5, 1, 0)
    model = Sequential((
        Dense(1, input_dim=1, activation='relu'),
        Dense(1, activation='sigmoid'),
    ))
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
    weights = model.get_weights()

    hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
    assert len(hist.epoch) >= patience

    # This should allow training to go for at least `patience` epochs
    model.set_weights(weights)
    hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
    assert len(hist.epoch) >= patience 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:22,代碼來源:test_callbacks.py

示例14: test_EarlyStopping_patience

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def test_EarlyStopping_patience():
    class DummyModel(object):
        def __init__(self):
            self.stop_training = False

    early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
    early_stop.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040, 0.1019]

    # Should stop after epoch 3, as the loss has not improved after patience=2 epochs.
    epochs_trained = 0
    early_stop.on_train_begin()

    for epoch in range(len(losses)):
        epochs_trained += 1
        early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})

        if early_stop.model.stop_training:
            break

    assert epochs_trained == 3 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:24,代碼來源:test_callbacks.py

示例15: main

# 需要導入模塊: from keras import callbacks [as 別名]
# 或者: from keras.callbacks import EarlyStopping [as 別名]
def main(rootdir, case, results):
    train_x, train_y, valid_x, valid_y, test_x, test_y = get_data(args.dataset, case)

    input_shape = (train_x.shape[1], train_x.shape[2])
    num_class = train_y.shape[1]
    if not os.path.exists(rootdir):
        os.makedirs(rootdir)
    filepath = os.path.join(rootdir, str(case) + '.hdf5')
    saveto = os.path.join(rootdir, str(case) + '.csv')
    optimizer = Adam(lr=args.lr, clipnorm=args.clip)
    pred_dir = os.path.join(rootdir, str(case) + '_pred.txt')

    if args.train:
        model = creat_model(input_shape, num_class)
        early_stop = EarlyStopping(monitor='val_acc', patience=15, mode='auto')
        reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5, mode='auto', cooldown=3., verbose=1)
        checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
        csv_logger = CSVLogger(saveto)
        if args.dataset=='NTU' or args.dataset == 'PKU':
            callbacks_list = [csv_logger, checkpoint, early_stop, reduce_lr]
        else:
            callbacks_list = [csv_logger, checkpoint]

        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        model.fit(train_x, train_y, validation_data=[valid_x, valid_y], epochs=args.epochs,
                  batch_size=args.batch_size, callbacks=callbacks_list, verbose=2)

    # test
    model = creat_model(input_shape, num_class)
    model.load_weights(filepath)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    scores = get_activation(model, test_x, test_y, pred_dir, VA=10, par=9)

    results.append(round(scores, 2)) 
開發者ID:microsoft,項目名稱:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代碼行數:37,代碼來源:va-rnn.py


注:本文中的keras.callbacks.EarlyStopping方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。