当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.fit_generator方法代码示例

本文整理汇总了Python中keras.models.Sequential.fit_generator方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.fit_generator方法的具体用法?Python Sequential.fit_generator怎么用?Python Sequential.fit_generator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.fit_generator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_CAE

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def train_CAE():

    encoder = containers.Sequential()
    encoder.add(Permute((3,1,2),input_shape=(h,w,ch))) # reorder input to ch, h, w (no sample axis)
    encoder.add(GaussianNoise(0.05)) # corrupt inputs slightly
    encoder.add(Convolution2D(16,3,3,init='glorot_uniform',border_mode='same'))
    encoder.add(MaxPooling2D((2,2)))
    encoder.add(Activation('tanh'))
    encoder.add(Convolution2D(32,3,3,init='glorot_uniform',border_mode='same'))
    encoder.add(MaxPooling2D((2,2)))
    encoder.add(Activation('tanh'))
    decoder = containers.Sequential()
    decoder.add(UpSampling2D((2,2),input_shape=(32,32,32)))
    decoder.add(Convolution2D(3,3,3,init='glorot_uniform',border_mode='same'))
    decoder.add(Activation('tanh'))
    decoder.add(UpSampling2D((2,2),input_shape=(16,64,64)))
    decoder.add(Convolution2D(3,3,3,init='glorot_uniform',border_mode='same'))
    decoder.add(Activation('tanh'))
    decoder.add(Permute((2,3,1)))
    autoencoder = AutoEncoder(encoder,decoder)

    model = Sequential()
    model.add(autoencoder)
    model.compile(optimizer='rmsprop', loss='mae')
    # if shapes don't match, check the output_shape of encoder/decoder
    genr = image_generator(biz_id_train['photo_id'], batch_size)
    model.fit_generator(genr, samples_per_epoch=len(biz_id_train), nb_epoch=10)
开发者ID:wongjingping,项目名称:yelp,代码行数:29,代码来源:CAE.py

示例2: train

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def train(dataReader, oneHot, oneHotAveraged, contextHashes):
	n = (Epochs + 1) * SamplesPerEpoch  # TODO + 1 should not be needed

	tokeniser = Tokenizer(nb_words=MaxWords)
	tokeniser.fit_on_texts((row[0] for row in dataReader.trainingData(n)))

	# `word_index` maps each word to its unique index
	dictionarySize = len(tokeniser.word_index) + 1

	oneHotDimension        = (1 if oneHotAveraged else SequenceLength) * dictionarySize if oneHot else 0
	contextHashesDimension = dictionarySize * 2 if contextHashes else 0

	model = Sequential()
	model.add(Dense(EmbeddingDim, input_dim=(oneHotDimension + contextHashesDimension)))
	model.add(Dense(Labels, activation='softmax'))
	model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

	trainingGenerator   = mapGenerator(dataReader.trainingData(n),   tokeniser, dictionarySize, oneHot, oneHotAveraged, contextHashes)
	validationGenerator = mapGenerator(dataReader.validationData(n), tokeniser, dictionarySize, oneHot, oneHotAveraged, contextHashes)

	model.fit_generator(trainingGenerator,
		nb_epoch=Epochs,
		samples_per_epoch=SamplesPerEpoch,
		validation_data=validationGenerator,
		nb_val_samples=SamplesPerEpoch)

	model2 = Sequential()
	model2.add(Dense(EmbeddingDim, input_dim=(oneHotDimension + contextHashesDimension), weights=model.layers[0].get_weights()))

	return model, model2, tokeniser, dictionarySize
开发者ID:poliglot,项目名称:fasttext,代码行数:32,代码来源:model.py

示例3: test_multiprocessing_fit_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def test_multiprocessing_fit_error():
    batch_size = 10
    good_batches = 3

    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (50, 2)),
                   np.random.randint(batch_size, 2, 50))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(StopIteration):
        model.fit_generator(
            custom_generator(), samples, 1,
            workers=4, use_multiprocessing=True,
        )

    with pytest.raises(StopIteration):
        model.fit_generator(
            custom_generator(), samples, 1,
            use_multiprocessing=False,
        )
开发者ID:dansbecker,项目名称:keras,代码行数:30,代码来源:test_multiprocessing.py

示例4: main

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def main():
    ext = extension_from_parameters()

    out_dim = 1
    loss = 'mse'
    metrics = None
    #metrics = ['accuracy'] if CATEGORICAL else None

    datagen = RegressionDataGenerator()
    train_gen = datagen.flow(batch_size=BATCH_SIZE)
    val_gen = datagen.flow(val=True, batch_size=BATCH_SIZE)
    val_gen2 = datagen.flow(val=True, batch_size=BATCH_SIZE)

    model = Sequential()
    for layer in LAYERS:
        if layer:
            model.add(Dense(layer, input_dim=datagen.input_dim, activation=ACTIVATION))
            if DROP:
                model.add(Dropout(DROP))
    model.add(Dense(out_dim))

    model.summary()
    model.compile(loss=loss, optimizer='rmsprop', metrics=metrics)

    train_samples = int(datagen.n_train/BATCH_SIZE) * BATCH_SIZE
    val_samples = int(datagen.n_val/BATCH_SIZE) * BATCH_SIZE

    history = BestLossHistory(val_gen2, val_samples, ext)
    checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)

    model.fit_generator(train_gen, train_samples,
                        nb_epoch = NB_EPOCH,
                        validation_data = val_gen,
                        nb_val_samples = val_samples,
                        callbacks=[history, checkpointer])
开发者ID:carrondt,项目名称:Benchmarks,代码行数:37,代码来源:p1b3_mlp_v3.py

示例5: CNN

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
class CNN(object):
    def __init__(self):
        self.model = Sequential([
            Conv2D(50, (3, 3), input_shape=(28, 28, 1), padding='same', activation='relu'),
            Conv2D(50, (3, 3), input_shape=(28, 28, 1), padding='same', activation='relu'),
            MaxPool2D(pool_size=(4, 4), strides=(3, 3), padding='same'),

            Conv2D(32, (3, 3), padding='same', activation='relu' ),
            Conv2D(32, (3, 3), padding='same', activation='relu' ),
            MaxPool2D(pool_size=(7, 7), strides=(3, 3), padding='same'),

            Flatten(),
            Dropout(0.5),
            Dense(64, activation='relu'),
            Dense(10, activation='softmax')
        ])

        self.model.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])

    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        train_datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2)
        train_datagen.fit(mnist.train.images.reshape(-1, 28, 28, 1))

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28, 1), mnist.test.labels
        self.model.fit_generator(train_datagen.flow(mnist.train.images.reshape(-1, 28, 28, 1), mnist.train.labels),
                       #batch_size=128,
                       epochs=20,
                       verbose=1,
                       validation_data=(x_test, y_test),
                       callbacks=[TrainValTensorBoard(log_dir='./logs/cnn4', histogram_freq=1, write_grads=True)])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
开发者ID:tracholar,项目名称:wiki,代码行数:37,代码来源:cnn4.py

示例6: test_multiprocessing_fit_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def test_multiprocessing_fit_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.fit_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.fit_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
开发者ID:alfredplpl,项目名称:keras,代码行数:31,代码来源:test_multiprocessing.py

示例7: MLP

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
class MLP(BaseEstimator):
    def __init__(self, verbose=0, model=None, final_activation='sigmoid'):
        self.verbose = verbose
        self.model = model
        self.final_activation = final_activation

    def fit(self, X, y):
        if not self.model:
            self.model = Sequential()
            self.model.add(Dense(1000, input_dim=X.shape[1]))
            self.model.add(Activation('relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(y.shape[1]))
            self.model.add(Activation(self.final_activation))
            self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
        self.model.fit_generator(generator=_batch_generator(X, y, 256, True),
                                 samples_per_epoch=X.shape[0], nb_epoch=20, verbose=self.verbose)

    def predict(self, X):
        pred = self.predict_proba(X)
        return sparse.csr_matrix(pred > 0.2)

    def predict_proba(self, X):
        pred = self.model.predict_generator(generator=_batch_generatorp(X, 512), val_samples=X.shape[0])
        return pred
开发者ID:quadflor,项目名称:Quadflor,代码行数:27,代码来源:neural_net.py

示例8: test_CallbackValData

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    train_generator = data_generator(X_train, y_train, batch_size)
    model.fit_generator(train_generator, len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
开发者ID:ZhangXinNan,项目名称:keras,代码行数:29,代码来源:test_callbacks.py

示例9: CNN

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def CNN(trainDir, validationDir, classNum):
    model = Sequential()
    model.add(Convolution2D(4, 3, 3, input_shape=(img_width, img_height, 1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(4, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Convolution2D(16, 3, 3))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Dense(classNum))
    model.add(Activation('softmax'))
    # test
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zca_whitening=True,
            zoom_range=0.2,
            horizontal_flip=False)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1./255, zca_whitening=True)
    train_generator = train_datagen.flow_from_directory(
            trainDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
            validationDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=nb_epoch,
            validation_data=validation_generator,
            nb_val_samples=nb_validation_samples)
    return model
开发者ID:Harlaus,项目名称:zhihu,代码行数:61,代码来源:SVGG.py

示例10: try_params

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def try_params( n_iterations, params, data=None, datamode='memory'):

	print "iterations:", n_iterations
	print_params( params )

        batchsize = 100
        if datamode == 'memory':
            X_train, Y_train = data['train']
            X_valid, Y_valid = data['valid']
            inputshape = X_train.shape[1:]
        else:
            train_generator = data['train']['gen_func'](batchsize, data['train']['path'])
            valid_generator = data['valid']['gen_func'](batchsize, data['valid']['path'])
            train_epoch_step = data['train']['n_sample'] / batchsize
            valid_epoch_step = data['valid']['n_sample'] / batchsize
            inputshape = data['train']['gen_func'](batchsize, data['train']['path']).next()[0].shape[1:]

        model = Sequential()
	model.add(Conv2D(128, (1, 24), padding='same', input_shape=inputshape, activation='relu'))
        model.add(GlobalMaxPooling2D())

        model.add(Dense(32,activation='relu'))
        model.add(Dropout(params['DROPOUT']))
        model.add(Dense(2))
        model.add(Activation('softmax'))

        optim = Adadelta
        myoptimizer = optim(epsilon=params['DELTA'], rho=params['MOMENT'])
        mylossfunc = 'categorical_crossentropy'
        model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])

        early_stopping = EarlyStopping( monitor = 'val_loss', patience = 3, verbose = 0 )

        if datamode == 'memory':
            model.fit(
                    X_train,
                    Y_train,
                    batch_size=batchsize,
                    epochs=int( round( n_iterations )),
                    validation_data=(X_valid, Y_valid),
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate(X_valid,Y_valid)
        else:
            model.fit_generator(
                    train_generator,
                    steps_per_epoch=train_epoch_step,
                    epochs=int( round( n_iterations )),
                    validation_data=valid_generator,
                    validation_steps=valid_epoch_step,
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate_generator(valid_generator, steps=valid_epoch_step)

	return { 'loss': score, 'model': (model.to_json(), optim, myoptimizer.get_config(), mylossfunc) }
开发者ID:zhouyu,项目名称:Keras-genomics,代码行数:55,代码来源:model.py

示例11: lm

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def lm():
    maxlen=10
    cfig = getattr(config, 'get_config_morph')('cs')
    batch_size, nb_epoch = cfig['batch_size'], 200
    X_train, y_train = getTextFile(cfig['train_file'], cfig['train_dic'], cfig)
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen, padding='post')
    y_train = sequence.pad_sequences(y_train, maxlen=maxlen, padding='post')
    #X_train, y_train = X_train[:10050], y_train[:10050]
    print X_train.shape, y_train.shape
    #X_test, y_test = getTextFile(cfig['test_file'], cfig['train_dic'], cfig)
    #X_test = sequence.pad_sequences(X_test, maxlen=10, padding='post')
    #y_test = sequence.pad_sequences(y_test, maxlen=10, padding='post')
    '''
    y_train_tensor3 = np.zeros((len(X_train), maxlen, cfig['vocab_size']), dtype=np.bool)
    i, t = 0, 0
    for sentence in y_train:
        t = 0
        for v in sentence:
            y_train_tensor3[i][t][v] = True
            t += 1
        i += 1
    k = 0
    for i , j in generate_data(X_train, y_train, 200, cfig['vocab_size']):
        print i.shape , j.shape
        if k > 20:
            break
        k += 1
    exit(0)
    '''
    print 'Build model...'
    model = Sequential()
    model.add(Embedding(cfig['vocab_size'], 128, dropout=0.2))
    model.add(LSTM(128, return_sequences=True)) #- original
    model.add(TimeDistributedDense(cfig['vocab_size']))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    print 'Train...'
    model.fit_generator(generate_data(X_train, y_train, batch_size, cfig['vocab_size']),
                        #samples_per_epoch=len(X_train)/batch_size,
                        samples_per_epoch=1000,
                        nb_epoch=nb_epoch)
    #model.fit(X_train, y_train_tensor3)
    exit(0)
    cnt = 0
    for i , j in generate_data(X_train, y_train, 200, cfig['vocab_size']):
        #model.train_on_batch(i, j)
        history= model.fit(i, j, batch_size=10, nb_epoch=1,verbose=0)
        if cnt >= 3:
            break
        cnt += 1
开发者ID:gumaojie,项目名称:morphlm,代码行数:54,代码来源:keras_lm.py

示例12: train_model

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def train_model(genre, dir_model, MP):
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) #check gpu is being used
    
    batch_size = MP['bs']
    lstm_size = MP['lstm_size']
    seq_length = MP['seq_length']
    drop = MP['dropout']
    lr = MP['lr']
    epochs = MP['epochs']
    
    text_to_int, int_to_text, n_chars = np.load('playlists/%s/ancillary_char.npy'%genre)
    vocab_size = len(text_to_int)
    X = np.load('playlists/%s/X_sl%d_char.npy'%(genre, seq_length))
    y = np.load('playlists/%s/y_sl%d_char.npy'%(genre, seq_length))

    # randomly shuffle samples before test/valid split
    np.random.seed(40)
    ran = [i for i in range(len(X))]
    np.random.shuffle(ran)
    
    X_train, X_valid, y_train, y_valid = train_test_split(X[ran], y[ran], test_size=0.2, random_state=42)

    try:
        model = load_model(dir_model)
        print("successfully loaded previous model, continuing to train")
    except:
        print("generating new model")
        model = Sequential()
        model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True,
                      input_shape=(seq_length, vocab_size)))
        for i in range(MP['n_layers'] - 1):
            model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True))
        model.add(TimeDistributed(Dense(vocab_size, activation='softmax'))) #output shape=(bs, sl, vocab)

        decay = 0.5*lr/epochs
        optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay, clipvalue=1)
        #optimizer = RMSprop(lr=lr, decay=decay)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['categorical_accuracy'])
    print(model.summary())

    # callbacks
    checkpoint = ModelCheckpoint(dir_model, monitor='loss', save_best_only=True, mode='min')
    #earlystop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3)
    callbacks_list = [checkpoint]

    # train
    model.fit_generator(one_hot_gen(X_train, y_train, vocab_size, seq_length, batch_size),
                        steps_per_epoch=len(X_train)/batch_size, epochs=epochs, callbacks=callbacks_list,
                        validation_data=one_hot_gen(X_valid, y_valid, vocab_size, seq_length, batch_size),
                        validation_steps=len(X_valid)/batch_size)
    model.save(dir_model)
开发者ID:silburt,项目名称:MachineLearning,代码行数:53,代码来源:train_lstm_char.py

示例13: test_sequential_fit_generator

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def test_sequential_fit_generator():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=False)
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=True)
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=False, validation_data=(X_test, y_test))
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=True, validation_data=(X_test, y_test))

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.9)
开发者ID:MingChaoSun,项目名称:keras,代码行数:33,代码来源:test_models.py

示例14: test_sequential_fit_generator

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def test_sequential_fit_generator():
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(x_train) // batch_size
        else:
            max_batch_index = len(x_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.pop()
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), 5, epochs)
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=(x_test, y_test))
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=data_generator(False),
                        validation_steps=3)
    model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
    model.evaluate(x_train, y_train)
开发者ID:5ke,项目名称:keras,代码行数:36,代码来源:test_sequential_model.py

示例15: model

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import fit_generator [as 别名]
def model(datagen, X_train, Y_train, X_test, Y_test):
    batch_size = 32
    nb_epoch = 200

    # input image dimensions
    img_rows, img_cols = 32, 32
    # the CIFAR10 images are RGB
    img_channels = 3

    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # fit the model on the batches generated by datagen.flow()
    model.fit_generator(datagen.flow(X_train, Y_train,
                        batch_size=batch_size),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=nb_epoch,
                        validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
开发者ID:ShuaiW,项目名称:hyperas,代码行数:51,代码来源:cifar_generator_cnn.py


注:本文中的keras.models.Sequential.fit_generator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。