本文整理汇总了Python中keras.preprocessing.image.ImageDataGenerator.fit方法的典型用法代码示例。如果您正苦于以下问题:Python ImageDataGenerator.fit方法的具体用法?Python ImageDataGenerator.fit怎么用?Python ImageDataGenerator.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.preprocessing.image.ImageDataGenerator
的用法示例。
在下文中一共展示了ImageDataGenerator.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: augment_data
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def augment_data(train_data):
augmented_data_generator = ImageDataGenerator(
rotation_range=20,
horizontal_flip=True
)
augmented_data_generator.fit(train_data)
return augmented_data_generator
开发者ID:bio-ontology-research-group,项目名称:neural-network-plant-trait-classification,代码行数:9,代码来源:playground.py
示例2: data
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def data():
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
示例3: main
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def main():
model = Model()
if (sys.argv[1] == "test"):
global nb_epoch
nb_epoch = 0
global WEIGHTS_FILE
WEIGHTS_FILE = sys.argv[2]
elif(sys.argv[1] == "add"):
global X_train, Y_train, X_val1, Y_val1
X_train = np.concatenate((X_train, X_val1), axis=0)
Y_train = np.concatenate((Y_train, Y_val1), axis=0)
adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=adam)
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=False)
datagen.fit(X_train)
callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
show_accuracy=True, callbacks=callbacks)
model.load_weights(WEIGHTS_FILE)
predict_test(model)
示例4: fit
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def fit(self,x,y,doRTA):
if doRTA == False:
self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
else:
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=False)
datagen.fit(x)
for e in range(self.epochs):
print('-'*40)
print('Epoch', e)
print('-'*40)
print('Training...')
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(x.shape[0])
for X_batch, Y_batch in datagen.flow(x, y):
loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
示例5: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train(self):
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
x_train, x_test = self.color_preprocessing(x_train, x_test)
# build network
model = self.build_model()
model.summary()
# Save the best model during each training checkpoint
checkpoint = ModelCheckpoint(self.model_filename,
monitor='val_loss',
verbose=0,
save_best_only= True,
mode='auto')
plot_callback = PlotLearning()
tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)
cbks = [checkpoint, plot_callback, tb_cb]
# set data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
datagen.fit(x_train)
# start training
model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
model.save(self.model_filename)
self._model = model
开发者ID:AhlamMD,项目名称:Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials,代码行数:36,代码来源:network_in_network.py
示例6: preprocess_data
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def preprocess_data(X_train, y_train, X_val, y_val, X_test, y_test):
print('start preprocess...')
X_train=scale_data(X_train)
X_val=scale_data(X_val)
X_test=scale_data(X_test)
#substract mean, per sample and per color channel
X_train, X_val, X_test = im.mean2(X_train, X_val, X_test)
#apply ZCA whitening on each color channel
#X_train=im.whiten(X_train,epsilon=0.1)
#X_test=im.whiten(X_test,epsilon=0.1)
g = ImageDataGenerator(width_shift_range=0.2,height_shift_range=0.2,horizontal_flip=True,\
fill_mode='nearest',dim_ordering='th')
g.fit(X_train)
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
print('...done')
return g, X_train, y_train, X_val, y_val, X_test, y_test
示例7: Machine_Generator
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
class Machine_Generator(Machine_cnn_lenet):
def __init__(self, X, y, nb_classes=2, steps_per_epoch=10, fig=True,
gen_param_dict=None):
super().__init__(X, y, nb_classes=nb_classes, fig=fig)
self.set_generator(steps_per_epoch=steps_per_epoch, gen_param_dict=gen_param_dict)
def set_generator(self, steps_per_epoch=10, gen_param_dict=None):
if gen_param_dict is not None:
self.generator = ImageDataGenerator(**gen_param_dict)
else:
self.generator = ImageDataGenerator()
print(self.data.X_train.shape)
self.generator.fit(self.data.X_train, seed=0)
self.steps_per_epoch = steps_per_epoch
def fit(self, nb_epoch=10, batch_size=64, verbose=1):
model = self.model
data = self.data
generator = self.generator
steps_per_epoch = self.steps_per_epoch
history = model.fit_generator(generator.flow(data.X_train, data.Y_train, batch_size=batch_size),
epochs=nb_epoch, steps_per_epoch=steps_per_epoch,
validation_data=(data.X_test, data.Y_test))
return history
示例8: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train():
model_ = 'VGG_16'
batch_size = 8
nb_classes = 5
nb_epoch = 200
data_augmentation = True
# input image dimensions
if model_ in MODELS[0:2]:
img_rows, img_cols = 224, 224
if model_ in MODELS[3]:
img_rows, img_cols = 299, 299
# the Yelp images are RGB
img_channels = 3
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,
model='VGG_16', data_dir='/home/rcamachobarranco/datasets')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# generate model
model = VGG_16(img_rows, img_cols, img_channels, nb_classes)
# let's train the model using SGD + momentum
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, y_train, batch_size=batch_size,
nb_epoch=nb_epoch, show_accuracy=True,
validation_data=(X_test, y_test), shuffle=True)
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch, show_accuracy=True,
validation_data=(X_test, y_test),
nb_worker=1)
示例9: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train():
(X_test, y_test, y_conf) = load.load_test_data()
Y_test = np_utils.to_categorical(y_test, classes)
print(X_test.shape[0], 'test samples')
X_test = X_test.astype("float32")
X_test /= 255
datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.01, height_shift_range=0.01, horizontal_flip=True, vertical_flip=True)
t0=time.time()
for e in range(nb_epoch):
print ("******** Epoch %d ********" % (e+1))
print ("Epoch Number: " + str(e))
for X_batch, y_batch, class_weight in BatchGenerator():
datagen.fit(X_batch)
model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
callbacks=[lh,checkpointer],
samples_per_epoch=split_size,
nb_epoch=nb_epoch_per,
validation_data=(X_test,Y_test)
,class_weight=class_weight
)
y_pred = model.predict_classes(X_test, batch_size=20)
(accuracy, correct)=PredictionMatrix()
#model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' ) % (e+1), overwrite=True)
#print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
t1=time.time()
tyme = t1-t0
print("Training completed in %f seconds" % tyme)
if save_name != '':
model.save_weights(direct + '/weights/' + save_name, overwrite=True)
print ("Weights saved to " + save_name)
print ("Final training weights saved to " + save_name)
return tyme
示例10: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train(self,model):
#training parameters
batch_size = 128
maxepoches = 250
learning_rate = 0.1
lr_decay = 1e-6
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
lrf = learning_rate
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
#optimization details
sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
for epoch in range(1,maxepoches):
if epoch%25==0 and epoch>0:
lrf/=2
sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epoch,
validation_data=(x_test, y_test),initial_epoch=epoch-1)
model.save_weights('cifar100vgg.h5')
return model
示例11: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train(self,save_model_to_file = True,rotation_range = 20,width_shift_range=0.5,height_shift_range=0.2):
""" Trains the model using the dataset in letters_folder """
# Read the data
data = []
labels = []
for imgName in listdir(self.letters_folder):
img = cv2.imread(self.letters_folder+"/"+imgName, cv2.IMREAD_GRAYSCALE)
data.append(img)
# Get the label from the image path and then get the index from the letters list
labels.append(self.letters.index(imgName.split('_')[0]))
data = np.array(data)
labels = np.array(labels)
# Split train and test
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.33, random_state=42)
X_train = X_train.reshape(X_train.shape[0], 1, self.img_rows, self.img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, self.img_rows, self.img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, self.nb_classes)
Y_test = np_utils.to_categorical(y_test, self.nb_classes)
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(rotation_range=rotation_range, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=width_shift_range, # randomly shift images horizontally (fraction of total width)
height_shift_range=height_shift_range)# randomly shift images vertically (fraction of total height))
datagen.fit(X_train)
# fit the model on the batches generated by datagen.flow()
history = self.model.fit_generator(datagen.flow(X_train, Y_train, batch_size=self.batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=self.nb_epoch,
validation_data=(X_test, Y_test))
# Plot History
plt.figure(figsize=(10,10))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
if save_model_to_file:
self.model.save_weights(self.weights_path,overwrite=True)
示例12: hard_train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def hard_train(data_prefix, prefix, seed, col):
what = ['systole', 'diastole'][col % 2]
print('We are going to train hard {} {}'.format(what, col))
print('Loading training data...')
X, y = load_train_data(data_prefix, seed)
X_train, y_train, X_test, y_test = split_data(X, y, split_ratio=0.2)
model = get_model()
nb_iter = 200
epochs_per_iter = 1
batch_size = 32
min_val = sys.float_info.max
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=True) # randomly flip images
print('-'*50)
print('Training...')
print('-'*50)
datagen.fit(X_train)
checkpointer_best = ModelCheckpoint(filepath=prefix + "weights_{}_best.hdf5".format(what), verbose=1, save_best_only=True)
checkpointer = ModelCheckpoint(filepath=prefix + "weights_{}.hdf5".format(what), verbose=1, save_best_only=False)
hist = model.fit_generator(datagen.flow(X_train, y_train[:, col], batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_iter, show_accuracy=False,
validation_data=(X_test, y_test[:, col]),
callbacks=[checkpointer, checkpointer_best],
nb_worker=4)
loss = hist.history['loss'][-1]
val_loss = hist.history['val_loss'][-1]
with open(prefix + 'val_loss.txt', mode='w+') as f:
f.write(str(min(hist.history['val_loss'])))
f.write('\n')
示例13: train_model
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train_model(model, dataset):
"""
Train convolutional neural network model.
Provides the option of using data augmentation to minimize over-fitting.
Options used currently are:
rotation_range - rotates the image.
width_shift_range - shifts the position of the image horizontally.
height_shift_range - shifts the position of the image vertically.
horizontal_flip - flips the image horizontally.
"""
print("\n- TRAINING MODEL -----------------------------------------------")
if not DATA_AUGMENTATION:
print('Not using data augmentation.')
model.fit(dataset.train_data, dataset.train_labels,
batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
verbose=1, show_accuracy=True,
validation_data=(dataset.validate_data,
dataset.validate_labels))
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
# Rotate image between 0 and 10 degrees randomly
rotation_range=0.1,
# Shift image by 1px horizontally randomly
width_shift_range=0.1,
# Shift image by 1px vertically randomly
height_shift_range=0.1,
# Flip the image horizontally randomly
horizontal_flip=True,
vertical_flip=False)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(dataset.train_data)
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(dataset.train_data,
dataset.train_labels,
shuffle=True, batch_size=BATCH_SIZE),
samples_per_epoch=dataset.train_data.shape[0],
nb_epoch=NB_EPOCH, verbose=1, show_accuracy=True,
validation_data=(dataset.validate_data,
dataset.validate_labels),
nb_worker=1)
return model
示例14: train
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def train(data, Model, file_name, num_epochs=50, batch_size=128, init=None):
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted)
model = Model(None).model
print(model.summary())
def get_lr(epoch):
return base_lr*(.5**(epoch/num_epochs*10))
sgd = SGD(lr=0.00, momentum=0.9, nesterov=False)
schedule= LearningRateScheduler(get_lr)
model.compile(loss=fn,
optimizer=sgd,
metrics=['accuracy'])
if Model == MNISTModel:
datagen = ImageDataGenerator(
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
horizontal_flip=False)
base_lr = 0.1
else:
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
base_lr = 0.1
datagen.fit(data.train_data)
model.fit_generator(datagen.flow(data.train_data, data.train_labels,
batch_size=batch_size),
steps_per_epoch=data.train_data.shape[0] // batch_size,
epochs=num_epochs,
verbose=1,
validation_data=(data.validation_data, data.validation_labels),
callbacks=[schedule])
print('Test accuracy:', np.mean(np.argmax(model.predict(data.test_data),axis=1)==np.argmax(data.test_labels,axis=1)))
if file_name != None:
model.save_weights(file_name)
return model
示例15: get_datagen
# 需要导入模块: from keras.preprocessing.image import ImageDataGenerator [as 别名]
# 或者: from keras.preprocessing.image.ImageDataGenerator import fit [as 别名]
def get_datagen(X):
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0,
height_shift_range=0,
horizontal_flip=False,
vertical_flip=False)
Xsample = X[np.random.choice(X.shape[0], 10000), :]
datagen.fit(Xsample)
return datagen