当前位置: 首页>>代码示例>>Python>>正文


Python ImageDataGenerator.flow方法代码示例

本文整理汇总了Python中keras.preprocessing.image.ImageDataGenerator.flow方法的典型用法代码示例。如果您正苦于以下问题:Python ImageDataGenerator.flow方法的具体用法?Python ImageDataGenerator.flow怎么用?Python ImageDataGenerator.flow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.preprocessing.image.ImageDataGenerator的用法示例。


在下文中一共展示了ImageDataGenerator.flow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_image_data_generator_training

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
开发者ID:TNonet,项目名称:keras,代码行数:34,代码来源:test_image_data_tasks.py

示例2: augmentation

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   
        width_shift_range=0.3,  
        height_shift_range=0.3,   
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans=np.vstack([scans,batch])
        i += 1
        if i > n:
            break
    i=0
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks=np.vstack([masks,batch])
        i += 1
        if i > n:
            break
    return((scans,masks))
开发者ID:ericsolo,项目名称:python,代码行数:28,代码来源:unet_utils.py

示例3: main

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def main():
    model = Model()
    if (sys.argv[1] == "test"):
        global nb_epoch
        nb_epoch = 0
        global WEIGHTS_FILE
        WEIGHTS_FILE = sys.argv[2]

    elif(sys.argv[1] == "add"):
        global X_train, Y_train, X_val1, Y_val1
        X_train = np.concatenate((X_train, X_val1), axis=0)
        Y_train = np.concatenate((Y_train, Y_val1), axis=0)

    adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam)

    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=False)

    datagen.fit(X_train)
    callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
                 EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
                        show_accuracy=True, callbacks=callbacks)

    model.load_weights(WEIGHTS_FILE)
    predict_test(model)
开发者ID:Zvikush90,项目名称:BIU-evaluationary-Algo-Ex1,代码行数:36,代码来源:mnist_cnn.py

示例4: train

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def train():
    model_ = 'VGG_16'
    batch_size = 8
    nb_classes = 5
    nb_epoch = 200
    data_augmentation = True

    # input image dimensions
    if model_ in MODELS[0:2]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299
    # the Yelp images are RGB
    img_channels = 3

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,
                                                     model='VGG_16', data_dir='/home/rcamachobarranco/datasets')
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # generate model
    model = VGG_16(img_rows, img_cols, img_channels, nb_classes)

    # let's train the model using SGD + momentum
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, y_test),
                            nb_worker=1)
开发者ID:neostoic,项目名称:image_classification,代码行数:62,代码来源:convnet_keras_griffin.py

示例5: augment_img

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def augment_img(input_file, output_folder, img_format='jpg',
                number_imgs=10):
    """
    Generate number_imgs new images from a given image.
    This function is inspired from the following blog post:
    https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
    """
    datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

    img = load_img(input_file)
    x = img_to_array(img)
    x = x.reshape((1,) + x.shape)
    i = 0
    for batch in datagen.flow(x, batch_size=1,
                              save_to_dir=output_folder,
                              save_format=img_format):
        i += 1
        if i > number_imgs:
            break
开发者ID:yassineAlouini,项目名称:kaggle-tools,代码行数:28,代码来源:image_augmentation.py

示例6: gen_augment_arrays

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def gen_augment_arrays(array, label, augmentations, rounds = 1):
    if augmentations is None:
        yield array, label
    else:

        auggen = ImageDataGenerator(featurewise_center = augmentations['featurewise_center'],
                                    samplewise_center = augmentations['samplewise_center'],
                                    featurewise_std_normalization = augmentations['featurewise_std_normalization'],
                                    samplewise_std_normalization = augmentations['samplewise_std_normalization'],
                                    zca_whitening = augmentations['zca_whitening'],
                                    rotation_range = augmentations['rotation_range'],
                                    width_shift_range = augmentations['width_shift_range'],
                                    height_shift_range = augmentations['height_shift_range'],
                                    shear_range = augmentations['shear_range'],
                                    zoom_range = augmentations['zoom_range'],
                                    channel_shift_range = augmentations['channel_shift_range'],
                                    fill_mode = augmentations['fill_mode'],
                                    cval = augmentations['cval'],
                                    horizontal_flip = augmentations['horizontal_flip'],
                                    vertical_flip = augmentations['vertical_flip'],
                                    rescale = augmentations['rescale'])

        array_augs, label_augs = next(auggen.flow(np.tile(array[np.newaxis],
                                                (rounds * augmentations['rounds'], 1, 1, 1)),
                                        np.tile(label[np.newaxis],
                                                (rounds * augmentations['rounds'], 1)),
                                        batch_size=rounds * augmentations['rounds']))

        for array_aug, label_aug in zip(array_augs, label_augs):
            yield array_aug, label_aug
开发者ID:codealphago,项目名称:transfer,代码行数:32,代码来源:augment_arrays.py

示例7: train

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def train():
    (X_test, y_test, y_conf) = load.load_test_data()
    Y_test = np_utils.to_categorical(y_test, classes)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    datagen = ImageDataGenerator(rotation_range=30,  width_shift_range=0.01,  height_shift_range=0.01,  horizontal_flip=True, vertical_flip=True)
    t0=time.time()
    for e in range(nb_epoch):
        print ("******** Epoch %d ********" % (e+1))
        print ("Epoch Number: " + str(e))
        for X_batch, y_batch, class_weight in BatchGenerator():
            datagen.fit(X_batch)
            model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
            callbacks=[lh,checkpointer],
            samples_per_epoch=split_size,
            nb_epoch=nb_epoch_per,
            validation_data=(X_test,Y_test)
            ,class_weight=class_weight
            )
            y_pred = model.predict_classes(X_test, batch_size=20)
        (accuracy, correct)=PredictionMatrix()
        #model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' )  % (e+1), overwrite=True)
        #print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
    t1=time.time()
    tyme = t1-t0   
    print("Training completed in %f seconds" % tyme)
    if save_name != '':
        model.save_weights(direct + '/weights/' + save_name, overwrite=True)
        print ("Weights saved to " + save_name)
    print ("Final training weights saved to " + save_name)
    return tyme
开发者ID:HarryPratt,项目名称:FreshNetwork,代码行数:34,代码来源:model.py

示例8: train

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        model = self.build_model()
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
        datagen.fit(x_train)

        # start training
        model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
        
        model.save(self.model_filename)

        self._model = model
开发者ID:AhlamMD,项目名称:Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials,代码行数:36,代码来源:network_in_network.py

示例9: train_generator

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
 def train_generator(x, y, batch_size, shift_fraction=0.):
     train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
                                        height_shift_range=shift_fraction)  # shift up to 2 pixel for MNIST
     generator = train_datagen.flow(x, y, batch_size=batch_size)
     while 1:
         x_batch, y_batch = generator.next()
         yield ([x_batch, y_batch], [y_batch, x_batch])
开发者ID:lzqkean,项目名称:deep_learning,代码行数:9,代码来源:capsulenet.py

示例10: fit

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
	def fit(self,x,y,doRTA):
		if doRTA == False:
			self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
		else:
			datagen = ImageDataGenerator(
			        featurewise_center=True,  # set input mean to 0 over the dataset
			        samplewise_center=False,  # set each sample mean to 0
			        featurewise_std_normalization=True,  # divide inputs by std of the dataset
			        samplewise_std_normalization=False,  # divide each input by its std
			        zca_whitening=False,
			        rotation_range=20,
			        width_shift_range=0.2, 
			        height_shift_range=0.2,
			        horizontal_flip=True, 
			        vertical_flip=False)
			datagen.fit(x)

			for e in range(self.epochs):
			    print('-'*40)
			    print('Epoch', e)
			    print('-'*40)
			    print('Training...')
			    # batch train with realtime data augmentation
			    progbar = generic_utils.Progbar(x.shape[0])
			    for X_batch, Y_batch in datagen.flow(x, y):
			        loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
			        progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
开发者ID:tereka114,项目名称:MachineLearningCombinator,代码行数:29,代码来源:keras_nn.py

示例11: predict_labels

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def predict_labels(model):
    """writes test image labels and predictions to csv"""
    
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        test_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        shuffle=False,
        class_mode=None)

    base_path = "../data/test/test/"

    with open("prediction.csv", "w") as f:
        p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
        for _, _, imgs in os.walk(base_path):
            for im in imgs:
                pic_id = im.split(".")[0]
                img = load_img(base_path + im)
                img = imresize(img, size=(img_height, img_width))
                test_x = img_to_array(img).reshape(3, img_height, img_width)
                test_x = test_x.reshape((1,) + test_x.shape)
                test_generator = test_datagen.flow(test_x,
                                                   batch_size=1,
                                                   shuffle=False)
                prediction = model.predict_generator(test_generator, 1)[0][0]
                p_writer.writerow([pic_id, prediction])
开发者ID:jannson,项目名称:Similar,代码行数:29,代码来源:img_clf.py

示例12: train_model

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def train_model(model,X_train,y_train):
    
    print("Training Model")
    
    # Image data generator to augment the data
    datagen = ImageDataGenerator(rotation_range = 2,
                             featurewise_center = False,
                             featurewise_std_normalization=False,
                             zoom_range = [0.8, 1],
                             fill_mode = 'constant',
                             cval=0)
    
    # Setup a regression network with adam optimizer
    model.compile(loss='mean_squared_error', optimizer='adam')
    
    # Incrementally save the best model basd on loss value
    chkpnt = ModelCheckpoint('model.h5',monitor='loss',verbose=1,save_best_only=True,mode='min')
    callbacks_list = [chkpnt]

    # Shuffle data
    X_train,y_train = shuffle(X_train,y_train)
    
    #Train the network with a batch size of 32 using the image data generator for a total of 10 epochs
    model.fit_generator(datagen.flow(X_train,y_train,batch_size=64),samples_per_epoch=len(X_train),nb_epoch=10,callbacks=callbacks_list,verbose=1)
    #,save_to_dir='./AugData',save_prefix='aug'
    model.save("model_final.h5")
    return model
开发者ID:wyw636,项目名称:Deep_Behavioral_Cloning,代码行数:29,代码来源:model.py

示例13: train

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
    def train(self,model):

        #training parameters
        batch_size = 128
        maxepoches = 250
        learning_rate = 0.1
        lr_decay = 1e-6

        # The data, shuffled and split between train and test sets:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train, x_test = self.normalize(x_train, x_test)

        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)

        lrf = learning_rate


        #data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)



        #optimization details
        sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])


        # training process in a for loop with learning rate drop every 25 epoches.

        for epoch in range(1,maxepoches):

            if epoch%25==0 and epoch>0:
                lrf/=2
                sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
                model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

            historytemp = model.fit_generator(datagen.flow(x_train, y_train,
                                             batch_size=batch_size),
                                steps_per_epoch=x_train.shape[0] // batch_size,
                                epochs=epoch,
                                validation_data=(x_test, y_test),initial_epoch=epoch-1)
        model.save_weights('cifar100vgg.h5')
        return model
开发者ID:BoyuanFeng,项目名称:Specialization,代码行数:60,代码来源:half2.py

示例14: train

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
def train():
    
    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    #checkpointer = ModelCheckpoint(filepath="/Users/quinnjarrell/Desktop/Experiments/keras/saved/", verbose=1, save_best_only=True)
    min_score = 91293921
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=True,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=True,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        datagen.fit(X_train)
        progbar = generic_utils.Progbar(X_train.shape[0])
        x = 0
        for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=128):# save_to_dir="/Users/quinnjarrell/datasets/catsvsdogs/train/resized/resized_generated"):
            loss = model.train_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=128):
            score = model.test_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
        if score < min_score:
            print ("New best model with score: %s", score)
            save_data()
            min_score = score
开发者ID:TheRushingWookie,项目名称:EC2-Keras,代码行数:46,代码来源:catsvsdogs.py

示例15: fit

# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import flow [as 别名]
    def fit(self, train_set, test_set, nb_epoch):
        super(ModelCNNBasic, self).fit(train_set, test_set, nb_epoch)
        # data augmentation
        datagen = ImageDataGenerator(
            zca_whitening=False,
            rotation_range=180,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            vertical_flip=True,
            shear_range=0.2,
            zoom_range=0.2
        )

        datagen.fit(self.x_train)

        verbose = 1
        if not self.verbose:
            verbose = 0

        if test_set is not None:
            early_stopping = EarlyStopping(monitor='val_loss', patience=20)
            checkpoint_path = 'output/checkpoint/{}'.format(self.name)
            helpers.prepare_dir(checkpoint_path, empty=True)
            checkpoint_path = os.path.join(checkpoint_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
            checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', save_best_only=False)
            callbacks = [early_stopping, checkpoint]
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     validation_data=(self.x_test, self.y_test),
                                     callbacks=callbacks,
                                     verbose=verbose,
                                     )
        else:
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     verbose=verbose
                                     )
开发者ID:grishasergei,项目名称:lipnet,代码行数:46,代码来源:model_cnn.py


注:本文中的keras.preprocessing.image.ImageDataGenerator.flow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。