本文整理汇总了Python中keras.preprocessing.image.ImageDataGenerator.standardize方法的典型用法代码示例。如果您正苦于以下问题:Python ImageDataGenerator.standardize方法的具体用法?Python ImageDataGenerator.standardize怎么用?Python ImageDataGenerator.standardize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.preprocessing.image.ImageDataGenerator
的用法示例。
在下文中一共展示了ImageDataGenerator.standardize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import standardize [as 别名]
#.........这里部分代码省略.........
else:
hsv_augmentation = None
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=da['featurewise_center'],
# set each sample mean to 0
samplewise_center=da['samplewise_center'],
# divide inputs by std of the dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=da['samplewise_std_normalization'],
zca_whitening=da['zca_whitening'],
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=da['rotation_range'],
# randomly shift images horizontally (fraction of total width)
width_shift_range=da['width_shift_range'],
# randomly shift images vertically (fraction of total height)
height_shift_range=da['height_shift_range'],
horizontal_flip=da['horizontal_flip'],
vertical_flip=da['vertical_flip'],
hsv_augmentation=hsv_augmentation,
zoom_range=da['zoom_range'],
shear_range=da['shear_range'],
channel_shift_range=da['channel_shift_range'])
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train, seed=0)
# Apply normalization to test data
for i in range(len(X_test)):
X_test[i] = datagen.standardize(X_test[i])
# Fit the model on the batches generated by datagen.flow().
steps_per_epoch = X_train.shape[0] // batch_size
model.save(model_chk_path.format(epoch=0).replace('.00.',
'.00.a.'))
t0 = time.time()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
steps_per_epoch=steps_per_epoch,
epochs=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=callbacks)
t1 = time.time()
# Train one epoch without augmentation to make sure data distribution
# is fit well
loss_history = history_cb.history["loss"]
epochs_augmented_training = len(loss_history)
model.fit(X_train, Y_train,
batch_size=batch_size,
epochs=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=len(loss_history))
t2 = time.time()
loss_history = history_cb.history["loss"]
acc_history = history_cb.history["acc"]
val_acc_history = history_cb.history["val_acc"]
np_loss_history = np.array(loss_history)
np_acc_history = np.array(acc_history)
np_val_acc_history = np.array(val_acc_history)
history_data = zip(list(range(1, len(np_loss_history) + 1)),
示例2: range
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import standardize [as 别名]
#Reading Testing Images
log.info('Loading Testing images.')
test_images_full_info = []
for i in range(N_TRAIN+1, N_TRAIN+N_TRAIN_TEST+1):
#print '\rLoading:',i-(N_TRAIN),'/',N_TRAIN_TEST,'.',
test_images_full_info.append(load_imgs('train/' + str(i) + '.png'))
print '\n'
#Preprossesing Test Images
log.info('Preprosseing Test images.')
ZZ = np.array(test_images_full_info)
Z = np.zeros((len(ZZ),1,32,32))
for i in range(len(ZZ)):
Z[i] = datagen.standardize(ZZ[i])
y_test = np.array(labels[N_TRAIN+1:N_TRAIN+1+N_TRAIN_TEST])
log.info('Starting Classifier.')
nn = Classifier(
layers=[
Convolution("Rectifier",channels=64,kernel_shape=(3,3)),
Native(lasagne.Conv2DLayer, num_filters=32,filter_size=(3,3), nonlinearity=nl.leaky_rectify),
Native(lasagne.MaxPool2DLayer, pool_size=(2,2)),
Native(lasagne.DropoutLayer, p=0.25),
Native(lasagne.Conv2DLayer, num_filters=64,filter_size=(3,3), nonlinearity=nl.leaky_rectify),
Native(lasagne.Conv2DLayer, num_filters=64,filter_size=(3,3), nonlinearity=nl.leaky_rectify),
Native(lasagne.MaxPool2DLayer, pool_size=(2,2)),
#Convolution("Rectifier",channels=100,kernel_shape=(30,30)),