当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.test_on_batch方法代码示例

本文整理汇总了Python中keras.models.Sequential.test_on_batch方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.test_on_batch方法的具体用法?Python Sequential.test_on_batch怎么用?Python Sequential.test_on_batch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.test_on_batch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_model

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
def train_model(feature_layers, classification_layers, image_list, nb_epoch, nb_classes, img_rows, img_cols, weights=None): 
    # Create testset data for cross-val
    num_images = len(image_list)
    test_size = int(0.2 * num_images)
    print("Train size: ", num_images-test_size)
    print("Test size: ", test_size)

    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)

    if not(weights is None):
        model.set_weights(weights)

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])
    return model, model.get_weights()
开发者ID:poojaramesh,项目名称:capstone,代码行数:43,代码来源:mitosis_cnn_transfer.py

示例2: __init__

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
class LSTM_RNN:

    def __init__(self, look_back, dropout_probability = 0.2, init ='he_uniform', loss='mse', optimizer='rmsprop'):
        self.rnn = Sequential()
        self.look_back = look_back
        self.rnn.add(LSTM(10, stateful = True, batch_input_shape=(1, 1, 1), init=init))
        self.rnn.add(Dropout(dropout_probability))
        self.rnn.add(Dense(1, init=init))
        self.rnn.compile(loss=loss, optimizer=optimizer)

    def batch_train_test(self, trainX, trainY, testX, testY, nb_epoch=150):
        print('Training LSTM-RNN...')
        for epoch in range(nb_epoch):
            print('Epoch '+ str(epoch+1) +'/{}'.format(nb_epoch))
            training_losses = []
            testing_losses = []
            for i in range(len(trainX)):
                y_actual = trainY[i]
                for j in range(self.look_back):
                    training_loss = self.rnn.train_on_batch(np.expand_dims(np.expand_dims(trainX[i][j], axis=1), axis=1),
                                                       np.array([y_actual]))
                    training_losses.append(training_loss)
                self.rnn.reset_states()

            print('Mean training loss = {}'.format(np.mean(training_losses)))

            mean_testing_loss = []
            for i in range(len(testX)):
                for j in range(self.look_back):
                    testing_loss = self.rnn.test_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1),
                                                          np.array([testY[i]]))
                    testing_losses.append(testing_loss)
                self.rnn.reset_states()

                for j in range(self.look_back):
                    y_pred = self.rnn.predict_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1))
                self.rnn.reset_states()

            mean_testing_loss = np.mean(testing_losses)
            print('Mean testing loss = {}'.format(mean_testing_loss))
        return mean_testing_loss
开发者ID:EAboelhamd,项目名称:deeplearning-timeseries,代码行数:43,代码来源:lstm_model.py

示例3: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
    print('Iteration', iteration)

    print("Training")
    progbar = generic_utils.Progbar(train_num_samples)
    gen = samples_generator(train_sequences, batch_size,
                            num_samples=train_num_samples)
    for X, y in gen:
        loss, accuracy = model.train_on_batch(X, y, accuracy=True)
        progbar.add(batch_size, values=[("train loss", loss),
                    ("train acc", accuracy)])
    print()

    print("Validating")
    progbar = generic_utils.Progbar(valid_num_samples)
    gen = samples_generator(valid_sequences, batch_size,
                            num_samples=valid_num_samples)
    valid_loss = 0
    for X, y in gen:
        loss, accuracy = model.test_on_batch(X, y, accuracy=True)
        progbar.add(batch_size, values=[("valid loss", loss),
                    ("valid acc", accuracy)])
        valid_loss += loss
    print()
    valid_loss /= float(valid_num_samples)

    print("Valid Loss: {}, Best Loss: {}".format(valid_loss, best_loss))
    if valid_loss < best_loss:
        print("Saving model")
        save_model(model, "sentence_model")
        best_loss = valid_loss
开发者ID:mynameisfiber,项目名称:nanogenmo2015,代码行数:32,代码来源:sentence_model.py

示例4: run

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
def run():
    datadir = "/reg/d/ana01/temp/davidsch/ImgMLearnFull"
    h5files = glob(os.path.join(datadir, "amo86815_mlearn-r070*.h5"))
    h5files.extend(glob(os.path.join(datadir, "amo86815_mlearn-r071*.h5")))
#    h5files = ["/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0000.h5",
#               "/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0001.h5",
#               "/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0002.h5"]
    assert len(h5files)>0
    
    datareader = H5MiniBatchReader(h5files=h5files,
                                   minibatch_size=32,
                                   validation_size=64,
                                   feature_dataset='xtcavimg',
                                   label_dataset='acq.peaksLabel',
                                   return_as_one_hot=True,
                                   feature_preprocess=['log','mean'],
                                   number_of_batches=None,
                                   class_labels_max_imbalance_ratio=1.0,
                                   add_channel_to_2D='channel_row_column',
                                   max_mb_to_preload_all=None,
                                   random_seed=None,
                                   verbose=True)  

    validation_features, validation_labels = datareader.get_validation_set()

    print("starting to build and compile keras/theano model...")
    sys.stdout.flush()
    t0 = time.time()
    model = Sequential()

    ## layer 1
    kern01_W_init = (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(8,1,8,8)).astype(np.float32)
    kern01_B_init = np.zeros(8,dtype=np.float32)
    model.add(Convolution2D(8,8,8, border_mode='same', weights=[kern01_W_init, kern01_B_init],
                            input_shape=datareader.features_placeholder_shape()[1:]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(10,10), strides=(13,13)))
    
    ## layer 2
    kern02_W_init = (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(8,8,6,6)).astype(np.float32)
    kern02_B_init = np.zeros(8,dtype=np.float32)
    model.add(Convolution2D(8,6,6, border_mode='same', weights=[kern02_W_init, kern02_B_init]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(10,10), strides=(13,13)))
    
    model.add(Flatten())
    
    ## layer 3
    H03_W_init =  (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(96,16)).astype(np.float32)
    H03_B_init = np.zeros(16,dtype=np.float32)
    model.add(Dense(16, weights=[H03_W_init, H03_B_init]))
    model.add(Activation('relu'))
    
    ## layer 4
    H04_W_init =  (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(16, datareader.num_outputs())).astype(np.float32)
    H04_B_init = np.zeros(datareader.num_outputs(),dtype=np.float32)
    model.add(Dense(datareader.num_outputs(), weights=[H04_W_init, H04_B_init]))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.01, decay=0.0004, momentum=0.96)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    print("building/compiling theano model took %.2f sec" % (time.time()-t0),)
    sys.stdout.flush()

    for step_number in range(3000):
        t0 = time.time()
        train_features, train_labels = datareader.get_next_minibatch()
        model.train_on_batch(train_features, train_labels)
        print("step %3d took %.2f sec." % (step_number, time.time()-t0))
        sys.stdout.flush()

    print("Starting evaluation.")
    t0 = time.time()
    loss, validation_accuracy = model.test_on_batch(validation_features, validation_labels, accuracy=True, sample_weight=None)
    print("validation accuracy: %.2f%%" % (100.0*validation_accuracy,))
    print("evaluation took %.2f sec" % (time.time()-t0,))
开发者ID:davidslac,项目名称:xtcav-mlearn-doc,代码行数:78,代码来源:keras_simple.py

示例5: EarlyStopping

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
model.compile(loss=qri.mae_clip, optimizer=sgd)

# Use early stopping and saving as callbacks
early_stop = EarlyStopping(monitor='val_loss', patience=10)
save_best = ModelCheckpoint("models/%s.mdl" % MDL_NAME, save_best_only=True)
callbacks = [early_stop, save_best]

# Train model
t0 = time.time()
hist = model.fit(train_set[0], train_set[1], validation_data=valid_set,
                 verbose=2, callbacks=callbacks, nb_epoch=1000, batch_size=20)
time_elapsed = time.time() - t0

# Load best model
model.load_weights("models/%s.mdl" % MDL_NAME)

# Print time elapsed and loss on testing dataset
test_set_loss = model.test_on_batch(test_set[0], test_set[1])
print "\nTime elapsed: %f s" % time_elapsed
print "Testing set loss: %f" % test_set_loss

# Save results
qri.save_results("results/%s.out" % MDL_NAME, time_elapsed, test_set_loss)
qri.save_history("models/%s.hist" % MDL_NAME, hist.history)

# Plot training and validation loss
qri.plot_train_valid_loss(hist.history)

# Make predictions
qri.plot_test_predictions(model, train_set)
开发者ID:akashlevy,项目名称:Deep-Learn-Oil,代码行数:32,代码来源:fcn.py

示例6: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
        inputs /= 255
        loss, acc = model.train_on_batch(inputs, targets)
        train_loss += loss
        train_acc += acc
        train_batches += 1

    # And a full pass over the validation data:
    val_loss = 0
    val_acc = 0
    val_batches = 0
    test_str.reset() # re-start streaming
    for inputs, targets, pad in val_str:
        if pad: # not full batch
            break
        inputs /= 255
        loss, acc = model.test_on_batch(inputs, targets)
        val_loss += loss
        val_acc += acc
        val_batches += 1

    # Then we print the results for this epoch:
    print("Epoch {}/{} time {:.3f}s; train loss: {:.6f} acc:{:.6f}; val loss: {:.6f} acc:{:.2f}".format(
        epoch + 1, nb_epoch, time.time() - start_time,
        train_loss/train_batches, train_acc/train_batches,
        val_loss/val_batches, val_acc/val_batches))

# After training, we compute and print the test error:
test_loss = 0
test_acc = 0
test_batches = 0
for inputs, targets, pad in test_str:
开发者ID:Jameskry,项目名称:picpac,代码行数:33,代码来源:cifar10_cnn.py

示例7: rnnmlp

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
class rnnmlp ():
        def __init__(self, r=(21, 109), dt=0.3):
                self.r=r
                self.dt=dt
                self.rnnModel = Sequential()
                self.maxFeatures=r[1]-r[0] +1

	'''
	simple RNN model, 
	'''
        def SimpleRNNModel(self, nHidden=120, lr = 0.01):
                self.rnnModel.add(SimpleRNN( nHidden, input_shape =( None, self.maxFeatures), activation='sigmoid', return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)

	'''
	LSTM model
	'''
        def LSTMModel(self, nHidden=150, lr = 0.01):
#               print('nHidden: %i\tlr: %.3f' % ( nHidden, lr) )
                self.rnnModel.add(GRU( nHidden, activation='sigmoid', input_shape =( None, self.maxFeatures), return_sequences=True))
#                self.rnnModel.add(LSTM( nHidden, activation='sigmoid', input_shape =( None, nHidden), return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(nHidden))
                self.rnnModel.add(Activation('relu'))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)

        '''
	train module :
	train model , 
	file_name, the name of train or test file
	weight_save_file, save the model parameters
	'''
	def train(self, file_name, weight_save_file, batch_size=1, num_epoch=200):
                print('load data ---------------')

                file_train=os.path.join(os.path.split(os.path.dirname(__file__))[0],
                                'data',file_name,'train','*.mid')
                dataset = [midiread(f, self.r, self.dt).piano_roll.astype(theano.config.floatX) for f in glob.glob(file_train)]

                file_test=os.path.join(os.path.split(os.path.dirname(__file__))[0],
                                'data',file_name,'test','*.mid')
                testdataset = [midiread(f, self.r, self.dt).piano_roll.astype(theano.config.floatX) for f in glob.glob(file_test)]
                print('load done --------------')
                try:
                        for epoch in range(num_epoch):
                                t0 = time.time()
                                numpy.random.shuffle(dataset)
                                costs = []
                                accuracys = []
                                for s, sequence in enumerate(dataset):
                                        y = numpy.hstack((sequence,numpy.zeros((sequence.shape[0],1)) ))
                                        x = numpy.roll(y, 1, axis=0)
                                        x[0,:]=0
                                        x[0,self.maxFeatures-1]=1
                                        cost, accuracy= self.rnnModel.train_on_batch(numpy.array([x]), numpy.array([y]), accuracy=True)
                                        costs.append(cost)
                                        accuracys.append(accuracy)

                                print('epoch: %i/%i\tcost: %.5f\taccu: %.5f\ttime: %.4f s' % (epoch+1, num_epoch, numpy.mean(costs), numpy.mean(accuracys),time.time()-t0))
                                sys.stdout.flush()
                                test_accu=self.evaluate(testdataset)
                                print('test_accu: %.5f' % ( numpy.mean(test_accu)) )
                        self.rnnModel.save_weights(weight_save_file)
                except KeyboardInterrupt:
                        print('interrupt by user !')
	
	'''
	evaluate module :
	evaluate model with test data, compute cost and accuracy
	'''
        def evaluate(self, test_dataset):
                test_accuracy =[]
                for s, sequence in enumerate(test_dataset):
                        test_y = numpy.hstack((sequence,numpy.zeros((sequence.shape[0],1)) ))
                        test_x = numpy.roll(test_y, 1, axis=0)
                        test_x[0,:]=0
                        test_x[0,self.maxFeatures-1]=1
                        cost, accu = self.rnnModel.test_on_batch(numpy.array([test_x]),numpy.array([test_y]), accuracy=True)
                        test_accuracy.append(accu)
                return test_accuracy

	'''
	generate function : 
	generate music or chord,
	init_chord: the first note of the generate sequence
	file_name: file to save the sequence of generate notes
	LS : if true , add Lsystem , generate chord
		if false, no Lsystem, generate music notes
	chord_name: chord name under condition of LS = True
	chord_file: notes of all kinds of chords, load file
	state_file: Lsystem model parameters, load file
	n_steps: the length of generate sequence
	r: notes which counts
	'''
        def generate(self, init_chord, file_name, LS=False, chord_name=None, chord_file=None, state_file=None, n_steps=80, r=(21,109)):
#.........这里部分代码省略.........
开发者ID:chengjunwen,项目名称:music_rnn,代码行数:103,代码来源:rnnmlp.py

示例8: range

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")

        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            score, acc = model.test_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("train accuracy", acc)])

        print("Testing...")

        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score, acc = model.test_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("test accuracy", acc)])

        # test time!
        for X_batch, Y_batch in datagen.flow(X_te_orig, np.ones((1,X_te_orig.shape[0])), batch_size = X_te_orig.shape[0]):
            y_te = model.predict_classes(X_batch)

        save_out(y_te,labels_string,sorted_files_te,submission_fname)
开发者ID:naveensr89,项目名称:chars74k_nsr,代码行数:32,代码来源:cnn_test.py

示例9: get_data

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
                X, y = get_data(files[n:n+batch_size], n)
            else:
                X, y = get_data(files[n:], n)

            X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
            X_train = np.array(X_train)
            X_test = np.array(X_test)
            y_train = np.array(y_train)
            y_test = np.array(y_test)

            # convert class vectors to binary class matrices
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            Y_test = np_utils.to_categorical(y_test, nb_classes)

            model.train_on_batch(X_train, Y_train)
            l, a = model.test_on_batch(X_test, Y_test, accuracy=True)

            acc.append(a)
            loss.append(l)
        print ''
        print "Val_loss", (sum(loss) / len(loss))
        print "Val_acc", (sum(acc) / len(acc))

    # with random batch draws....
    # -------
    # Epoch 1
    # Val_loss 0.889370705837
    # Val_acc 0.479363625794

    # -------
    # Epoch 2
开发者ID:gravity226,项目名称:forex_net,代码行数:33,代码来源:forex_net.py

示例10: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train,batch_size=128):
            loss = modelCascade.train_on_batch(X_batch, Y_batch,accuracy=True)
            progbar.add(X_batch.shape[0], values=[('train loss', loss[0]),('train accuracy',loss[1])])      
        print('Testing...')
    # test time!
        accuracyArray = list()
        lossArray = list()
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score = modelCascade.test_on_batch(X_batch, Y_batch,accuracy=True)
            lossArray.append(score[0])
            accuracyArray.append(score[1])
            progbar.add(X_batch.shape[0], values=[('test loss', score[0]),('test accuracy',score[1])])
        lossIteration1.append(np.mean(lossArray))
        accuracyIteration1.append(np.mean(accuracyArray))            

    weightsLayer1 = modelCascade.layers[0].get_weights()

    print('SECOND ITERATION STARTING')
    #####SECOND ITERATION
    #MODEL THAT IS USED TO GENERATE INPUT HAT
    modelIH = Sequential()
    modelIH.add(Convolution2D(32, 3, 3, border_mode='same',
                        input_shape=(img_channels, img_rows, img_cols),weights=weightsLayer1))
    modelIH.add(Activation('relu'))
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:33,代码来源:CNN_CIFAR_10_4.py

示例11: main

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]

#.........这里部分代码省略.........
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(poolsize=(nb_pool[0], nb_pool[0])))
        # model.add(Dropout(0.25))
        #
        # model.add(Convolution2D(nb_filters[1], nb_filters[0], nb_conv[0], nb_conv[0], border_mode='full'))
        # model.add(Activation('relu'))
        # model.add(Convolution2D(nb_filters[1], nb_filters[1], nb_conv[1], nb_conv[1]))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(poolsize=(nb_pool[1], nb_pool[1])))
        # model.add(Dropout(0.25))
        #
        # model.add(Flatten())
        # # the image dimensions are the original dimensions divided by any pooling
        # # each pixel has a number of filters, determined by the last Convolution2D layer
        # model.add(Dense(nb_filters[-1] * (shapex / nb_pool[0] / nb_pool[1]) * (shapey / nb_pool[0] / nb_pool[1]), 1024))
        # # model.add(BatchNormalization([1024]))
        # model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        # model.add(Dense(1024, nb_classes))
        # model.add(Activation('softmax'))

        # let's train the model using SGD + momentum (how original).
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd)

        if not data_augmentation:
            print("Not using data augmentation or normalization")

            X_train = X_train.astype("float32", casting='unsafe')
            X_test = X_test.astype("float32", casting='unsafe')
            X_train /= 255.
            X_test /= 255.
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True)
            score, accu = model.evaluate(X_test, Y_test, batch_size=batch_size, show_accuracy=True)
            print('Test accuracy:', accu)

        else:
            print("Using real time data augmentation")
            # X_train = (X_train - np.mean(X_train, axis=0)) / np.std(X_train.flatten(), axis=0)
            # X_valid = (X_valid - np.mean(X_valid, axis=0)) / np.std(X_valid.flatten(), axis=0)
            # X_test = (X_test - np.mean(X_test, axis=0)) / np.std(X_test.flatten(), axis=0)
            # X_train = (X_train - np.mean(X_train, axis=0))
            # X_valid = (X_valid - np.mean(X_train, axis=0))
            # X_test = (X_test - np.mean(X_train, axis=0))
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=True,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=False,  # randomly flip images
                vertical_flip=False)  # randomly flip images

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(X_train)
            best_validation_accu = 0

            for e in range(nb_epoch):

                print('-'*40)
                print('Epoch', e)
                print('-'*40)
                print("Training...")
                # batch train with realtime data augmentation
                progbar = generic_utils.Progbar(X_train.shape[0])
                for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=batch_size):
                    score, trainAccu = model.train_on_batch(X_batch, Y_batch, accuracy=True)
                    progbar.add(X_batch.shape[0], values=[("train accuracy", trainAccu)])

                print("Validating...")
                # Validation time!
                progbar = generic_utils.Progbar(X_valid.shape[0])
                epochValidAccu = []
                for X_batch, Y_batch in datagen.flow(X_valid, Y_valid, batch_size=batch_size):
                    score, validAccu = model.test_on_batch(X_batch, Y_batch, accuracy=True)
                    epochValidAccu.append(validAccu)
                    progbar.add(X_batch.shape[0], values=[("validation accuracy", validAccu)])
                meanValidAccu = np.mean(epochValidAccu)
                if meanValidAccu > best_validation_accu:
                    best_validation_accu = meanValidAccu
                    best_iter = e
                    print("Testing...")
                    # test time!
                    progbar = generic_utils.Progbar(X_test.shape[0])
                    epochTestAccu = []
                    for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=batch_size):
                        score, testAccu = model.test_on_batch(X_batch, Y_batch, accuracy=True)
                        epochTestAccu.append(testAccu)
                        progbar.add(X_batch.shape[0], values=[("test accuracy", testAccu)])
                    model.save_weights('weigths_{0}'.format(foldNum), overwrite=True)
            validScores.append(best_validation_accu)
            testScores.append(np.mean(epochTestAccu))
    scipy.io.savemat('cnn_results', {'validAccu': validScores, 'testAccu': testScores})
    print ('Average valid accuracies: {0}'.format(np.mean(validScores)))
    print ('Average test accuracies: {0}'.format(np.mean(testScores)))
开发者ID:cyente,项目名称:EEG_CNN,代码行数:104,代码来源:eeg_cnn_3.py

示例12: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            
            loss = model.train_on_batch(X_batch, X_batch.reshape(X_batch.shape[0],X_train.shape[2]**2*3))
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, X_test):
            score = model.test_on_batch(X_batch, X_batch.reshape(X_batch.shape[0],X_train.shape[2]**2*3))
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
            
    model2 = Sequential()
    model2.add(encoder)
    codes = []
    targets = []
    model2.compile(loss = "mean_squared_error", optimizer = "sgd")
    for X_batch, Y_batch in datagen.flow(X_train, Y_train):
        codes.append(model2.predict(X_batch))
        targets.append(np.argmax(Y_batch))
        
    print('stack it...')
    codes = np.vstack(codes)
    targets = np.vstack(targets)
    print(codes.shape,'code shape')
开发者ID:labelforce,项目名称:python-backend,代码行数:33,代码来源:cifar_conv_autoencoder.py

示例13: cnn

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]

#.........这里部分代码省略.........
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=verbose,
            validation_data=(X_test, Y_test)
        )
        results = fitlog.history

    else:
        # turn on data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=True,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,
            rotation_range=0,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            vertical_flip=True
        )
        datagen.fit(X_train)

        for e in range(nb_epoch):
            if verbose:
                print "epoch:", e

            tmp_train_acc = []
            tmp_train_loss = []
            tmp_test_acc = []
            tmp_test_loss = []
            train_batch_counter = 0
            test_batch_counter = 0

            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size):
                train_loss, train_accuracy = model.train_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )

                tmp_train_acc.append(train_accuracy)
                tmp_train_loss.append(train_loss)
                train_batch_counter += 1

            for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size):
                valid_loss, valid_accuracy = model.test_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )
                tmp_test_acc.append(valid_accuracy)
                tmp_test_loss.append(valid_loss)
                test_batch_counter += 1

            epoch_train_acc = sum(tmp_train_acc) / float(train_batch_counter)
            epoch_train_loss = sum(tmp_train_loss) / float(train_batch_counter)
            epoch_test_acc = sum(tmp_test_acc) / float(test_batch_counter)
            epoch_test_loss = sum(tmp_test_loss) / float(test_batch_counter)

            results["acc"].append(epoch_train_acc)
            results["loss"].append(epoch_train_loss)
            results["val_acc"].append(epoch_test_acc)
            results["val_loss"].append(epoch_test_loss)

            if verbose:
                print "acc: {0}".format(epoch_train_acc),
                print "loss: {0}".format(epoch_train_loss),
                print "val_acc: {0}".format(epoch_test_acc),
                print "val_loss: {0}".format(epoch_test_loss)

    # save model
    if model_file:
        model_data = model.to_json()
        model_file = open(model_file, "w")
        model_file.write(json.dumps(model_data))
        model_file.close()

    # save model weights
    if weights_file:
        model.save_weights(weights_file, overwrite=True)

    # save results
    if results_file:
        results["nb_epoch"] = nb_epoch
        results["batch_size"] = batch_size
        rf = open(results_file, "w")
        rf.write(json.dumps(results))
        rf.close()

    # evaluate
    score = model.evaluate(
        X_test,
        Y_test,
        show_accuracy=True,
        verbose=verbose,
        batch_size=batch_size
    )

    return results, score
开发者ID:deerishi,项目名称:genetic-algorithm-for-cnn,代码行数:104,代码来源:cnn.py

示例14: train_whale_data

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]

#.........这里部分代码省略.........
    )
    model.add(Activation('relu'))
    model.add(Dropout(0.4))

    # layer 2
    model.add(
        Convolution2D(
            nb_filters,
            nb_conv,
            nb_conv,
            input_shape=(1, img_rows, img_cols),
        )
    )
    model.add(Activation('relu'))
    model.add(Dropout(0.4))

    # layer 3
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.4))

    # layer 4
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # compile, fit and evaluate model
    print("--> compiling CNN functions")
    model.compile(
        loss='categorical_crossentropy',
        optimizer='sgd'
    )

    # fit model
    print("--> fitting CNN")
    if data_augmentation is False:
        print "--> fitting data"
        fitlog = model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=1,
            validation_data=(X_test, Y_test)
        )
        results = fitlog.history

    else:
        # turn on data augmentation
        print "--> augmenting data"
        datagen = ImageDataGenerator(
            featurewise_center=False,
            featurewise_std_normalization=False,
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True,
        )
        datagen.fit(X_train)

        print "--> fitting data"
        for e in range(nb_epoch):
            print "epoch:", e
            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size):
                train_loss, train_accuracy = model.train_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )
                valid_loss, valid_accuracy = model.test_on_batch(
                    X_test,
                    Y_test,
                    accuracy=True
                )

            results["acc"].append(float(train_accuracy))
            results["val_acc"].append(float(valid_accuracy))
            results["loss"].append(float(train_loss))
            results["val_loss"].append(float(valid_loss))

            print "acc: {0}".format(train_accuracy),
            print "val_acc: {0}".format(valid_accuracy),
            print "acc_loss: {0}".format(train_loss),
            print "val_loss: {0}".format(valid_loss)

    # save model
    model_data = model.to_json()
    model_file = open(model_file, "w")
    model_file.write(json.dumps(model_data))
    model_file.close()

    # save model weights
    model.save_weights(weights_file, overwrite=True)

    # save results
    results["nb_epoch"] = nb_epoch
    results["batch_size"] = batch_size
    rf = open(results_file, "w")
    rf.write(json.dumps(results))
    rf.close()
开发者ID:deerishi,项目名称:genetic-algorithm-for-cnn,代码行数:104,代码来源:cnn.py

示例15: range

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import test_on_batch [as 别名]
for e in range(epoch_num):

    print 'Epoch #{}/{}'.format(e+1,epoch_num)
    sys.stdout.flush()

    shuffle(tr_it)


    for u in tqdm(tr_it):
        l,a=model.train_on_batch(tr_in[u],tr_out[u])
        tr_hist.r.addLA(l,a,tr_out[u].shape[0])
    # clear_output()
    tr_hist.log()

    for u in range(dev_in.shape[0]):
        l,a=model.test_on_batch(dev_in[u],dev_out[u])
        dev_hist.r.addLA(l,a,dev_out[u].shape[0])
    dev_hist.log()


    # for u in range(tst_in.shape[0]):
    #     l,a=model.test_on_batch(tst_in[u],tst_out[u])
    #     tst_hist.r.addLA(l,a,tst_out[u].shape[0])
    # tst_hist.log()


pickle.dump(model, open('models/classifier_enc.pkl','wb'))
pickle.dump(dev_hist, open('models/testHist_enc.pkl','wb'))
pickle.dump(tr_hist, open('models/trainHist_enc.pkl','wb'))

开发者ID:evankos,项目名称:ASRDemos,代码行数:31,代码来源:mlp.py


注:本文中的keras.models.Sequential.test_on_batch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。