当前位置: 首页>>代码示例>>Python>>正文


Python image.ImageDataGenerator方法代码示例

本文整理汇总了Python中keras.preprocessing.image.ImageDataGenerator方法的典型用法代码示例。如果您正苦于以下问题:Python image.ImageDataGenerator方法的具体用法?Python image.ImageDataGenerator怎么用?Python image.ImageDataGenerator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.preprocessing.image的用法示例。


在下文中一共展示了image.ImageDataGenerator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_image_data_augmentor_from_dataset

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def get_image_data_augmentor_from_dataset(dataset):
    from keras.preprocessing.image import ImageDataGenerator
    dataset_config = dataset['config']

    augShearRange = float(get_option(dataset_config, 'augShearRange', 0.1))
    augZoomRange = float(get_option(dataset_config, 'augZoomRange', 0.1))
    augHorizontalFlip = bool(get_option(dataset_config, 'augHorizontalFlip', False))
    augVerticalFlip = bool(get_option(dataset_config, 'augVerticalFlip', False))
    augRotationRange = float(get_option(dataset_config, 'augRotationRange', 0.2))

    return ImageDataGenerator(
        rotation_range=augRotationRange,
        shear_range=augShearRange,
        zoom_range=augZoomRange,
        horizontal_flip=augHorizontalFlip,
        vertical_flip=augVerticalFlip
    ) 
开发者ID:aetros,项目名称:aetros-cli,代码行数:19,代码来源:auto_dataset.py

示例2: evaluate_test_dataset

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def evaluate_test_dataset():
## Test
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    test_generator = test_datagen.flow_from_directory(
        dataset_test_path,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='sparse',                                                                           # Binary to Multi classification changes
        save_to_dir=None,
        shuffle=False)

    scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)

    logging.debug('model.metrics_names {}'.format(model.metrics_names))
    logging.debug('scores {}'.format(scores)) 
开发者ID:abhishekrana,项目名称:DeepFashion,代码行数:18,代码来源:test_multi.py

示例3: pre_processing

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def pre_processing(img):
    # Random exposure and saturation (0.9 ~ 1.1 scale)
    rand_s = random.uniform(0.9, 1.1)
    rand_v = random.uniform(0.9, 1.1)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)

    tmp = np.ones_like(img[:, :, 1]) * 255
    img[:, :, 1] = np.where(img[:, :, 1] * rand_s > 255, tmp, img[:, :, 1] * rand_s)
    img[:, :, 2] = np.where(img[:, :, 2] * rand_v > 255, tmp, img[:, :, 2] * rand_v)

    img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)

    # Centering helps normalization image (-1 ~ 1 value)
    return img / 127.5 - 1


# Get ImageDataGenerator arguments(options) depends on mode - (train, val, test) 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:20,代码来源:generator.py

示例4: __init__

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def __init__(
			self, train_path="../data_set/train", label_path="../data_set/label", merge_path="../data_set/merge",
			aug_merge_path="../data_set/aug_merge", aug_train_path="../data_set/aug_train",
			aug_label_path="../data_set/aug_label", img_type="tif"
	):

		# Using glob to get all .img_type form path
		self.train_imgs = glob.glob(train_path + "/*." + img_type)  # 训练集
		self.label_imgs = glob.glob(label_path + "/*." + img_type)  # label
		self.train_path = train_path
		self.label_path = label_path
		self.merge_path = merge_path
		self.img_type = img_type
		self.aug_merge_path = aug_merge_path
		self.aug_train_path = aug_train_path
		self.aug_label_path = aug_label_path
		self.slices = len(self.train_imgs)
		self.datagen = ImageDataGenerator(
			rotation_range=0.2,
			width_shift_range=0.05,
			height_shift_range=0.05,
			shear_range=0.05,
			zoom_range=0.05,
			horizontal_flip=True,
			fill_mode='nearest') 
开发者ID:DuFanXin,项目名称:U-net,代码行数:27,代码来源:data_Keras.py

示例5: train_model

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
开发者ID:ardamavi,项目名称:Dog-Cat-Classifier,代码行数:22,代码来源:train.py

示例6: test_image_data_generator_with_validation_split

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:25,代码来源:image_test.py

示例7: test_image_data_generator_invalid_data

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def test_image_data_generator_invalid_data(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_last')
        # Test fit with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((3, 10, 10))
            generator.fit(x)

        # Test flow with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((32, 10, 10))
            generator.flow(np.arange(x.shape[0])) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:19,代码来源:image_test.py

示例8: test_directory_iterator_class_mode_input

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def test_directory_iterator_class_mode_input(self, tmpdir):
        tmpdir.join('class-1').mkdir()

        # save the images in the paths
        count = 0
        for test_images in self.all_test_images:
            for im in test_images:
                filename = str(tmpdir / 'class-1' / 'image-{}.jpg'.format(count))
                im.save(filename)
                count += 1

        # create iterator
        generator = image.ImageDataGenerator()
        dir_iterator = generator.flow_from_directory(str(tmpdir), class_mode='input')
        batch = next(dir_iterator)

        # check if input and output have the same shape
        assert(batch[0].shape == batch[1].shape)
        # check if the input and output images are not the same numpy array
        input_img = batch[0][0]
        output_img = batch[1][0]
        output_img[0][0][0] += 1
        assert(input_img[0][0][0] != output_img[0][0][0]) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:25,代码来源:image_test.py

示例9: data_gen_mnist

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def data_gen_mnist(X_train):
    datagen = ImageDataGenerator()

    datagen.fit(X_train)
    return datagen 
开发者ID:sunblaze-ucb,项目名称:blackbox-attacks,代码行数:7,代码来源:mnist.py

示例10: learn

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def learn():
    (train_x, train_y, sample_weight), (test_x, test_y) = load_data()
    datagen = ImageDataGenerator(horizontal_flip=True,
                                 vertical_flip=True)
    train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight)
    base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3))
    for layer in base.layers[:-4]:
        layer.trainable = False
    model = models.Sequential([
        base,
        layers.BatchNormalization(),
        layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.BatchNormalization(),
        layers.Dense(64, activation='relu'),
        layers.BatchNormalization(),
        layers.Dropout(0.20),
        layers.Dense(80, activation='softmax')
    ])
    model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    reduce_lr = ReduceLROnPlateau(verbose=1)
    model.fit_generator(train_generator, epochs=400,
                        steps_per_epoch=100,
                        validation_data=(test_x[:800], test_y[:800]),
                        callbacks=[reduce_lr])
    result = model.evaluate(test_x, test_y)
    print(result)
    model.save('12306.image.model.h5', include_optimizer=False) 
开发者ID:testerSunshine,项目名称:12306,代码行数:33,代码来源:mlearn_for_image.py

示例11: predict_image_dir

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def predict_image_dir():
# Predict
# TODO: Hardcoding
# Put all images in sample_images/test folder
    dataset_predict_path='sample_images'
    #dataset_predict_path='temp'
    logging.debug('dataset_predict_path {}'.format(dataset_predict_path))

    predict_datagen = ImageDataGenerator(rescale=1. / 255)

    predict_generator = predict_datagen.flow_from_directory(
        dataset_predict_path,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='sparse',                                                                           # Binary to Multi classification changes
        save_to_dir=None,
        shuffle=False)

    nb_predict_samples = get_images_count_recursive(dataset_predict_path)
    logging.debug('nb_predict_samples {}'.format(nb_predict_samples))

    prediction = model.predict_generator(predict_generator, nb_predict_samples // batch_size, verbose=1)
    logging.debug('\n\nprediction \n{}'.format(prediction))


    # Display predictions
    matches=[]
    for root, dirnames, filenames in os.walk(os.path.join(dataset_predict_path,'test')):
        for filename in fnmatch.filter(filenames, '*.jpg'):
            matches.append(os.path.join(root, filename))

    for index,preds in enumerate(prediction):
        logging.debug('\n{}'.format((matches[index])))
        for index2, pred in enumerate(preds):
            logging.debug('class_names {}'.format(class_names[index2]))
            logging.debug('pred {0:6f}'.format(float(pred))) 
开发者ID:abhishekrana,项目名称:DeepFashion,代码行数:38,代码来源:test_multi.py

示例12: save_bottlebeck_features_btl

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def save_bottlebeck_features_btl():

    dataset_btl_path = 'dataset_btl/train'
    batch_size = 1

    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')                               # exclude 3 FC layers on top of network

    score_iou_btl_g, nb_btl_samples = get_images_count_recursive(dataset_btl_path)
    logging.debug('score_iou_btl_g {}'.format(score_iou_btl_g))
    logging.debug('nb_btl_samples {}'.format(nb_btl_samples))


    ## Train
    generator = datagen.flow_from_directory(
        dataset_btl_path,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        classes=None,                                                                               #  the order of the classes, which will map to the label indices, will be alphanumeric
        class_mode=None,                                                                            # "categorical": 2D one-hot encoded labels; "None": yield batches of data, no labels; "sparse" will be 1D integer labels.
        save_to_dir='temp',
        shuffle=False)                                                                              # Don't shuffle else [class index = alphabetical folder order] logic used below might become wrong; first 1000 images will be cats, then 1000 dogs
    logging.info('generator.class_indices {}'.format(generator.class_indices))
                                                                                                    # classes: If not given, the order of the classes, which will map to the label indices, will be alphanumeric
    bottleneck_features_btl = model.predict_generator(
        generator, nb_btl_samples // batch_size)
    logging.debug('bottleneck_features_btl {}'.format(bottleneck_features_btl.shape))           # bottleneck_features_train (10534, 4, 4, 512) where train images i.e Blazer+Jeans=5408+5126=10532 images;

    # save the output as a Numpy array
    logging.debug('Saving bottleneck_features_btl...')
    np.save(open('output/bottleneck_features_btl.npy', 'w'),
            bottleneck_features_btl) 
开发者ID:abhishekrana,项目名称:DeepFashion,代码行数:36,代码来源:train_multi_v2.py

示例13: load_and_preprocess_data_generator

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def load_and_preprocess_data_generator():

        # TBD
        train_data_dir = "dataset2/train"
        validation_data_dir = "dataset2/validation"

        # Initiate the train and test generators with data Augumentation
        train_datagen = ImageDataGenerator(rescale = 1./255, shear_range=0.2, horizontal_flip = True, fill_mode = "nearest",
                                           zoom_range = 0.3, width_shift_range = 0.3, height_shift_range=0.3,
                                           rotation_range=30)

        test_datagen = ImageDataGenerator(rescale = 1./255, shear_range=0.2, horizontal_flip = True, fill_mode = "nearest",
                                          zoom_range = 0.3, width_shift_range = 0.3, height_shift_range=0.3,
                                          rotation_range=30)

        train_generator = train_datagen.flow_from_directory(train_data_dir, target_size = (img_height, img_width),
                                                            batch_size = batch_size, class_mode = "categorical")

        validation_generator = test_datagen.flow_from_directory(validation_data_dir, target_size = (img_height, img_width),
                                                                class_mode = "categorical")

        # HARDCODING
        input_shape = (img_width, img_height, img_channels)
        logging.debug('input_shape {}'.format(input_shape))

        return train_generator, validation_generator, input_shape 
开发者ID:abhishekrana,项目名称:DeepFashion,代码行数:28,代码来源:cnn.py

示例14: get_batch_predictions

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def get_batch_predictions(path, batch_size=32):
    """
        Path: path to the image directory
        batch_size: default batch size is 32

        Return: batches and vector representation of each images
    """
    model = VGGFace(include_top=False, input_shape=(3, 224, 224), pooling='max')
    gen = image.ImageDataGenerator(rescale=1./255)

    _batches = gen.flow_from_directory(path, target_size=(224, 224), batch_size=batch_size, shuffle=False)
    _predictions = model.predict_generator(_batches, val_samples=_batches.n)
    return _batches, _predictions 
开发者ID:lehgtrung,项目名称:face-search,代码行数:15,代码来源:face.py

示例15: build_data_loader

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import ImageDataGenerator [as 别名]
def build_data_loader(X, Y):

    datagen = ImageDataGenerator()
    generator = datagen.flow(
        X, Y, batch_size=BATCH_SIZE)

    return generator 
开发者ID:bolunwang,项目名称:backdoor,代码行数:9,代码来源:gtsrb_visualize_example.py


注:本文中的keras.preprocessing.image.ImageDataGenerator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。