当前位置: 首页>>代码示例>>Python>>正文


Python image.ImageDataGenerator类代码示例

本文整理汇总了Python中keras.preprocessing.image.ImageDataGenerator的典型用法代码示例。如果您正苦于以下问题:Python ImageDataGenerator类的具体用法?Python ImageDataGenerator怎么用?Python ImageDataGenerator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ImageDataGenerator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_data_generators

def _get_data_generators(img_width, img_height, labels):
    train_datagen = ImageDataGenerator(
        fill_mode="nearest",
        horizontal_flip=True,
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    return train_generator, validation_generator
开发者ID:nirmalyaghosh,项目名称:mini-projects,代码行数:25,代码来源:train.py

示例2: preprocess_data

def preprocess_data(X_train, y_train, X_val, y_val, X_test, y_test):
	print('start preprocess...')

	X_train=scale_data(X_train)
	X_val=scale_data(X_val)
	X_test=scale_data(X_test)

	#substract mean, per sample and per color channel 
	X_train, X_val, X_test = im.mean2(X_train, X_val, X_test)

	#apply ZCA whitening on each color channel
	#X_train=im.whiten(X_train,epsilon=0.1)
	#X_test=im.whiten(X_test,epsilon=0.1)

	g = ImageDataGenerator(width_shift_range=0.2,height_shift_range=0.2,horizontal_flip=True,\
	fill_mode='nearest',dim_ordering='th') 
	g.fit(X_train)
	
	y_train = to_categorical(y_train)
	y_val = to_categorical(y_val)
	y_test = to_categorical(y_test)

	print('...done')

	return g, X_train, y_train, X_val, y_val, X_test, y_test
开发者ID:ybenigot,项目名称:keras,代码行数:25,代码来源:convnet.py

示例3: test_image_data_generator_training

def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
开发者ID:TNonet,项目名称:keras,代码行数:32,代码来源:test_image_data_tasks.py

示例4: train_model

def train_model(model,X_train,y_train):
    
    print("Training Model")
    
    # Image data generator to augment the data
    datagen = ImageDataGenerator(rotation_range = 2,
                             featurewise_center = False,
                             featurewise_std_normalization=False,
                             zoom_range = [0.8, 1],
                             fill_mode = 'constant',
                             cval=0)
    
    # Setup a regression network with adam optimizer
    model.compile(loss='mean_squared_error', optimizer='adam')
    
    # Incrementally save the best model basd on loss value
    chkpnt = ModelCheckpoint('model.h5',monitor='loss',verbose=1,save_best_only=True,mode='min')
    callbacks_list = [chkpnt]

    # Shuffle data
    X_train,y_train = shuffle(X_train,y_train)
    
    #Train the network with a batch size of 32 using the image data generator for a total of 10 epochs
    model.fit_generator(datagen.flow(X_train,y_train,batch_size=64),samples_per_epoch=len(X_train),nb_epoch=10,callbacks=callbacks_list,verbose=1)
    #,save_to_dir='./AugData',save_prefix='aug'
    model.save("model_final.h5")
    return model
开发者ID:wyw636,项目名称:Deep_Behavioral_Cloning,代码行数:27,代码来源:model.py

示例5: get_generators

def get_generators():
    train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        horizontal_flip=True,
        rotation_range=10.,
        width_shift_range=0.2,
        height_shift_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        os.path.join('data', 'train'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    validation_generator = test_datagen.flow_from_directory(
        os.path.join('data', 'test'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    return train_generator, validation_generator
开发者ID:genghaijian,项目名称:five-video-classification-methods,代码行数:26,代码来源:train_cnn.py

示例6: data

def data():
    nb_classes = 10
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    # this will do preprocessing and realtime data augmentation
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    return datagen, X_train, Y_train, X_test, Y_test
开发者ID:ShuaiW,项目名称:hyperas,代码行数:35,代码来源:cifar_generator_cnn.py

示例7: train

def train():
    (X_test, y_test, y_conf) = load.load_test_data()
    Y_test = np_utils.to_categorical(y_test, classes)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    datagen = ImageDataGenerator(rotation_range=30,  width_shift_range=0.01,  height_shift_range=0.01,  horizontal_flip=True, vertical_flip=True)
    t0=time.time()
    for e in range(nb_epoch):
        print ("******** Epoch %d ********" % (e+1))
        print ("Epoch Number: " + str(e))
        for X_batch, y_batch, class_weight in BatchGenerator():
            datagen.fit(X_batch)
            model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
            callbacks=[lh,checkpointer],
            samples_per_epoch=split_size,
            nb_epoch=nb_epoch_per,
            validation_data=(X_test,Y_test)
            ,class_weight=class_weight
            )
            y_pred = model.predict_classes(X_test, batch_size=20)
        (accuracy, correct)=PredictionMatrix()
        #model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' )  % (e+1), overwrite=True)
        #print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
    t1=time.time()
    tyme = t1-t0   
    print("Training completed in %f seconds" % tyme)
    if save_name != '':
        model.save_weights(direct + '/weights/' + save_name, overwrite=True)
        print ("Weights saved to " + save_name)
    print ("Final training weights saved to " + save_name)
    return tyme
开发者ID:HarryPratt,项目名称:FreshNetwork,代码行数:32,代码来源:model.py

示例8: save_bottlebeck_features

def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples // batch_size)
    np.save(open('bottleneck_features_train.npy', 'wb'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples // batch_size)
    np.save(open('bottleneck_features_validation.npy', 'wb'),
            bottleneck_features_validation)
开发者ID:maranemil,项目名称:howto,代码行数:27,代码来源:Image+Classifier+-+Keras.ipynb.py

示例9: load_data_generator

def load_data_generator(train_folderpath, mask_folderpath, img_size = (768, 768), mask_size=(768,768), batch_size=32):
    """
    Returns a data generator with masks and training data specified by the directory paths given.
    """
    data_gen_args = dict(
                        width_shift_range=0.2,
                        height_shift_range=0.2,
                        horizontal_flip=True,
                        rotation_range=10,
                        zoom_range=0.2,
                        fill_mode="constant", 
                        cval=0       
                        )

    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)
        
    seed = 42
    
    image_generator = image_datagen.flow_from_directory(train_folderpath, class_mode=None,
        target_size = img_size, seed=seed, color_mode = 'rgb', batch_size=batch_size)
    mask_generator = mask_datagen.flow_from_directory(mask_folderpath, class_mode=None, 
        target_size = mask_size,seed=seed, color_mode='grayscale', batch_size=batch_size)

    return zip(image_generator, mask_generator)
开发者ID:Ozzyz,项目名称:ship_detection,代码行数:25,代码来源:unet.py

示例10: train

    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        model = self.build_model()
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
        datagen.fit(x_train)

        # start training
        model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
        
        model.save(self.model_filename)

        self._model = model
开发者ID:AhlamMD,项目名称:Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials,代码行数:34,代码来源:network_in_network.py

示例11: augment_data

def augment_data(train_data):
    augmented_data_generator = ImageDataGenerator(
        rotation_range=20,
        horizontal_flip=True
    )
    augmented_data_generator.fit(train_data)
    return augmented_data_generator
开发者ID:bio-ontology-research-group,项目名称:neural-network-plant-trait-classification,代码行数:7,代码来源:playground.py

示例12: augmentation

def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   
        width_shift_range=0.3,  
        height_shift_range=0.3,   
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans=np.vstack([scans,batch])
        i += 1
        if i > n:
            break
    i=0
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks=np.vstack([masks,batch])
        i += 1
        if i > n:
            break
    return((scans,masks))
开发者ID:ericsolo,项目名称:python,代码行数:26,代码来源:unet_utils.py

示例13: main

def main():
    model = Model()
    if (sys.argv[1] == "test"):
        global nb_epoch
        nb_epoch = 0
        global WEIGHTS_FILE
        WEIGHTS_FILE = sys.argv[2]

    elif(sys.argv[1] == "add"):
        global X_train, Y_train, X_val1, Y_val1
        X_train = np.concatenate((X_train, X_val1), axis=0)
        Y_train = np.concatenate((Y_train, Y_val1), axis=0)

    adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam)

    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=False)

    datagen.fit(X_train)
    callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
                 EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
                        show_accuracy=True, callbacks=callbacks)

    model.load_weights(WEIGHTS_FILE)
    predict_test(model)
开发者ID:Zvikush90,项目名称:BIU-evaluationary-Algo-Ex1,代码行数:34,代码来源:mnist_cnn.py

示例14: fit

	def fit(self,x,y,doRTA):
		if doRTA == False:
			self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
		else:
			datagen = ImageDataGenerator(
			        featurewise_center=True,  # set input mean to 0 over the dataset
			        samplewise_center=False,  # set each sample mean to 0
			        featurewise_std_normalization=True,  # divide inputs by std of the dataset
			        samplewise_std_normalization=False,  # divide each input by its std
			        zca_whitening=False,
			        rotation_range=20,
			        width_shift_range=0.2, 
			        height_shift_range=0.2,
			        horizontal_flip=True, 
			        vertical_flip=False)
			datagen.fit(x)

			for e in range(self.epochs):
			    print('-'*40)
			    print('Epoch', e)
			    print('-'*40)
			    print('Training...')
			    # batch train with realtime data augmentation
			    progbar = generic_utils.Progbar(x.shape[0])
			    for X_batch, Y_batch in datagen.flow(x, y):
			        loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
			        progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
开发者ID:tereka114,项目名称:MachineLearningCombinator,代码行数:27,代码来源:keras_nn.py

示例15: predict_labels

def predict_labels(model):
    """writes test image labels and predictions to csv"""
    
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        test_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        shuffle=False,
        class_mode=None)

    base_path = "../data/test/test/"

    with open("prediction.csv", "w") as f:
        p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
        for _, _, imgs in os.walk(base_path):
            for im in imgs:
                pic_id = im.split(".")[0]
                img = load_img(base_path + im)
                img = imresize(img, size=(img_height, img_width))
                test_x = img_to_array(img).reshape(3, img_height, img_width)
                test_x = test_x.reshape((1,) + test_x.shape)
                test_generator = test_datagen.flow(test_x,
                                                   batch_size=1,
                                                   shuffle=False)
                prediction = model.predict_generator(test_generator, 1)[0][0]
                p_writer.writerow([pic_id, prediction])
开发者ID:jannson,项目名称:Similar,代码行数:27,代码来源:img_clf.py


注:本文中的keras.preprocessing.image.ImageDataGenerator类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。