当前位置: 首页>>代码示例>>Python>>正文


Python callbacks.EarlyStopping方法代码示例

本文整理汇总了Python中keras.callbacks.EarlyStopping方法的典型用法代码示例。如果您正苦于以下问题:Python callbacks.EarlyStopping方法的具体用法?Python callbacks.EarlyStopping怎么用?Python callbacks.EarlyStopping使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.callbacks的用法示例。


在下文中一共展示了callbacks.EarlyStopping方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train(self, X, y, validation_data):
    print('Training model...')
    multitask = y.shape[1] > 1
    if not multitask:
      num_positives = y.sum()
      num_sequences = len(y)
      num_negatives = num_sequences - num_positives
    self.model.fit(
        X,
        y,
        batch_size=128,
        nb_epoch=100,
        validation_data=validation_data,
        class_weight={
            True: num_sequences / num_positives,
            False: num_sequences / num_negatives
        } if not multitask else None,
        callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
        verbose=True) 
开发者ID:deepchem,项目名称:deepchem,代码行数:21,代码来源:models.py

示例2: train_sequential

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train_sequential(model, X, y, where_to_save, fit_params=None, monitor='val_acc'):
    # TODO: DOCUMENT once thoroughly tested
    # Watch out: where_to_save might be inside fit_params

    if fit_params is None:
        fit_params = {
            "batch_size": 32,
            "nb_epoch": 45,
            "verbose": True,
            "validation_split": 0.15,
            "show_accuracy": True,
            "callbacks": [EarlyStopping(verbose=True, patience=5, monitor=monitor),
                          ModelCheckpoint(where_to_save, monitor=monitor, verbose=True, save_best_only=True)]
        }
    print 'Fitting! Hit CTRL-C to stop early...'
    history = "Nothing to show"
    try:
        history = model.fit(X, y, **fit_params)
    except KeyboardInterrupt:
        print "Training stopped early!"
        history = model.history

    return history 
开发者ID:textclf,项目名称:fancy-cnn,代码行数:25,代码来源:train_neural.py

示例3: train_model

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train_model(self):

		checkpoint = ModelCheckpoint(self.PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

		if self.modality == "audio":
			model = self.get_audio_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "text":
			model = self.get_text_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "bimodal":
			model = self.get_bimodal_model()
			model.compile(optimizer='adam', loss='categorical_crossentropy', sample_weight_mode='temporal')

		early_stopping = EarlyStopping(monitor='val_loss', patience=10)
		model.fit(self.train_x, self.train_y,
		                epochs=self.epochs,
		                batch_size=self.batch_size,
		                sample_weight=self.train_mask,
		                shuffle=True, 
		                callbacks=[early_stopping, checkpoint],
		                validation_data=(self.val_x, self.val_y, self.val_mask))

		self.test_model() 
开发者ID:declare-lab,项目名称:MELD,代码行数:26,代码来源:baseline.py

示例4: LSTM

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def LSTM(self, argsDict):
        self.paras.batch_size             = argsDict["batch_size"]
        self.paras.model['dropout']       = argsDict['dropout']
        self.paras.model['activation']    = argsDict["activation"]
        self.paras.model['optimizer']     = argsDict["optimizer"]
        self.paras.model['learning_rate'] = argsDict["learning_rate"]

        print(self.paras.batch_size, self.paras.model['dropout'], self.paras.model['activation'], self.paras.model['optimizer'], self.paras.model['learning_rate'])

        model = self.lstm_model()
        model.fit(self.train_x, self.train_y,
              batch_size=self.paras.batch_size,
              epochs=self.paras.epoch,
              verbose=0,
              callbacks=[EarlyStopping(monitor='loss', patience=5)]
              )

        score, mse = model.evaluate(self.test_x, self.test_y, verbose=0)
        y_pred=model.predict(self.test_x)
        reca=Recall_s(self.test_y,y_pred)
        return -reca 
开发者ID:doncat99,项目名称:StockRecommendSystem,代码行数:23,代码来源:Stock_Prediction_Model_Stateless_LSTM.py

示例5: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train(model, image_data, y_true, log_dir='logs/'):
    '''retrain/fine-tune the model'''
    model.compile(optimizer='adam', loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
        monitor='val_loss', save_weights_only=True, save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')

    model.fit([image_data, *y_true],
              np.zeros(len(image_data)),
              validation_split=.1,
              batch_size=32,
              epochs=30,
              callbacks=[logging, checkpoint, early_stopping])
    model.save_weights(log_dir + 'trained_weights.h5')
    # Further training. 
开发者ID:scutan90,项目名称:YOLO-3D-Box,代码行数:21,代码来源:train.py

示例6: train_model

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train_model(self,model,X_train,X_test,y_train,y_test):
        input_y_train = self.include_start_token(y_train)
        print(input_y_train.shape)
        input_y_test = self.include_start_token(y_test)
        print(input_y_test.shape)
        early = EarlyStopping(monitor='val_loss',patience=10,mode='auto')

        checkpoint = ModelCheckpoint(self.outpath + 's2s_model_' + str(self.version) + '_.h5',monitor='val_loss',verbose=1,save_best_only=True,mode='auto')
        lr_reduce = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=2, verbose=0, mode='auto')
        model.fit([X_train,input_y_train],y_train, 
		      epochs=self.epochs,
		      batch_size=self.batch_size, 
		      validation_data=[[X_test,input_y_test],y_test], 
		      callbacks=[early,checkpoint,lr_reduce], 
		      shuffle=True)
        return model 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:chatbot.py

示例7: NerCallbacks

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def NerCallbacks(id_to_tag, best_fit_params=None, mask_tag=None, log_path=None):
    """模型训练过程中的回调函数
    """
    callbacks = [Accuracy(id_to_tag, mask_tag, log_path)]
    if best_fit_params is not None:
        early_stopping = EarlyStopping(
            monitor="val_crf_accuracy",
            patience=best_fit_params.get("early_stop_patience"))
        reduce_lr_on_plateau = ReduceLROnPlateau(
            monitor="val_crf_accuracy", verbose=1, mode="max",
            factor=best_fit_params.get("reduce_lr_factor"),
            patience=best_fit_params.get("reduce_lr_patience"))
        model_check_point = ModelCheckpoint(
            best_fit_params.get("save_path"),
            monitor="val_crf_accuracy", verbose=2, mode="max", save_best_only=True)
        callbacks.extend([early_stopping, reduce_lr_on_plateau, model_check_point])
    return callbacks 
开发者ID:liushaoweihua,项目名称:keras-bert-ner,代码行数:19,代码来源:callbacks.py

示例8: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
开发者ID:moxiu2012,项目名称:PJ_NLP,代码行数:25,代码来源:train.py

示例9: train

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
    
    # callbacks
    ear = EarlyStopping(monitor='val_acc', patience=5)
    mcp = ModelCheckpoint(join(save_path, 'malconv.h5'), 
                          monitor="val_acc", 
                          save_best_only=save_best, 
                          save_weights_only=False)
    
    history = model.fit_generator(
        utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
        steps_per_epoch=len(x_train)//batch_size + 1,
        epochs=epochs, 
        verbose=verbose, 
        callbacks=[ear, mcp],
        validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
        validation_steps=len(x_test)//batch_size + 1)
    return history 
开发者ID:j40903272,项目名称:MalConv-keras,代码行数:20,代码来源:train.py

示例10: finetuning_callbacks

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def finetuning_callbacks(checkpoint_path, patience, verbose):
    """ Callbacks for model training.

    # Arguments:
        checkpoint_path: Where weight checkpoints should be saved.
        patience: Number of epochs with no improvement after which
            training will be stopped.

    # Returns:
        Array with training callbacks that can be passed straight into
        model.fit() or similar.
    """
    cb_verbose = (verbose >= 2)
    checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
                                   save_best_only=True, verbose=cb_verbose)
    earlystop = EarlyStopping(monitor='val_loss', patience=patience,
                              verbose=cb_verbose)
    return [checkpointer, earlystop] 
开发者ID:bfelbo,项目名称:DeepMoji,代码行数:20,代码来源:finetuning.py

示例11: init_logging_callbacks

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def init_logging_callbacks(self,log_dir=LOG_DIR_ROOT):

		self.checkpoint = ModelCheckpoint(filepath="%s/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" % (log_dir),\
														monitor='loss',\
														verbose=1,\
														save_best_only=True,\
														mode='min')

		self.early_stopping = EarlyStopping(monitor='loss',\
													min_delta=0,\
													patience=PATIENCE,\
													verbose=0,\
													mode='auto')	

		now = datetime.utcnow().strftime("%Y%m%d%H%M%S")	
		log_dir = "{}/run/{}".format(LOG_DIR_ROOT,now)
		self.tensorboard = TensorBoard(log_dir=log_dir,\
											write_graph=True,\
											write_images=True)
		
		self.callbacks = [self.early_stopping,\
								self.tensorboard,\
								self.checkpoint] 
开发者ID:k3170makan,项目名称:PyMLProjects,代码行数:25,代码来源:model.py

示例12: get_callbacks

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def get_callbacks(config_data, appendix=''):
    ret_callbacks = []
    model_stored = False
    callbacks = config_data['callbacks']
    if K._BACKEND == 'tensorflow':
        tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
        ret_callbacks.append(tensor_board)
    for callback in callbacks:
        if callback['name'] == 'early_stopping':
            ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
        elif callback['name'] == 'model_checkpoit':
            model_stored = True
            path = config_data['output_path']
            basename = config_data['output_basename']
            base_path = os.path.join(path, basename)
            opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
            save_best = bool(callback['save_best_only'])
            ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
    return ret_callbacks, model_stored 
开发者ID:spinningbytes,项目名称:deep-mlsa,代码行数:21,代码来源:run_utils.py

示例13: test_EarlyStopping_reuse

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def test_EarlyStopping_reuse():
    np.random.seed(1337)
    patience = 3
    data = np.random.random((100, 1))
    labels = np.where(data > 0.5, 1, 0)
    model = Sequential((
        Dense(1, input_dim=1, activation='relu'),
        Dense(1, activation='sigmoid'),
    ))
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
    weights = model.get_weights()

    hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
    assert len(hist.epoch) >= patience

    # This should allow training to go for at least `patience` epochs
    model.set_weights(weights)
    hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
    assert len(hist.epoch) >= patience 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:22,代码来源:test_callbacks.py

示例14: test_EarlyStopping_patience

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def test_EarlyStopping_patience():
    class DummyModel(object):
        def __init__(self):
            self.stop_training = False

    early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
    early_stop.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040, 0.1019]

    # Should stop after epoch 3, as the loss has not improved after patience=2 epochs.
    epochs_trained = 0
    early_stop.on_train_begin()

    for epoch in range(len(losses)):
        epochs_trained += 1
        early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})

        if early_stop.model.stop_training:
            break

    assert epochs_trained == 3 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:24,代码来源:test_callbacks.py

示例15: main

# 需要导入模块: from keras import callbacks [as 别名]
# 或者: from keras.callbacks import EarlyStopping [as 别名]
def main(rootdir, case, results):
    train_x, train_y, valid_x, valid_y, test_x, test_y = get_data(args.dataset, case)

    input_shape = (train_x.shape[1], train_x.shape[2])
    num_class = train_y.shape[1]
    if not os.path.exists(rootdir):
        os.makedirs(rootdir)
    filepath = os.path.join(rootdir, str(case) + '.hdf5')
    saveto = os.path.join(rootdir, str(case) + '.csv')
    optimizer = Adam(lr=args.lr, clipnorm=args.clip)
    pred_dir = os.path.join(rootdir, str(case) + '_pred.txt')

    if args.train:
        model = creat_model(input_shape, num_class)
        early_stop = EarlyStopping(monitor='val_acc', patience=15, mode='auto')
        reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5, mode='auto', cooldown=3., verbose=1)
        checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
        csv_logger = CSVLogger(saveto)
        if args.dataset=='NTU' or args.dataset == 'PKU':
            callbacks_list = [csv_logger, checkpoint, early_stop, reduce_lr]
        else:
            callbacks_list = [csv_logger, checkpoint]

        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        model.fit(train_x, train_y, validation_data=[valid_x, valid_y], epochs=args.epochs,
                  batch_size=args.batch_size, callbacks=callbacks_list, verbose=2)

    # test
    model = creat_model(input_shape, num_class)
    model.load_weights(filepath)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    scores = get_activation(model, test_x, test_y, pred_dir, VA=10, par=9)

    results.append(round(scores, 2)) 
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:37,代码来源:va-rnn.py


注:本文中的keras.callbacks.EarlyStopping方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。