当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.predict_generator方法代码示例

本文整理汇总了Python中keras.models.Sequential.predict_generator方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.predict_generator方法的具体用法?Python Sequential.predict_generator怎么用?Python Sequential.predict_generator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.predict_generator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_multiprocessing_predict_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multiprocessing_predict_error():
    good_batches = 3
    workers = 4

    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(1, 256, size=(2, 5)),
                   np.random.randint(1, 256, size=(2, 5)))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(5,)))
    model.compile(loss='mse', optimizer='adadelta')

    with pytest.raises(StopIteration):
        model.predict_generator(
            custom_generator(), good_batches * workers + 1, 1,
            workers=workers, use_multiprocessing=True,
        )

    with pytest.raises(StopIteration):
        model.predict_generator(
            custom_generator(), good_batches + 1, 1,
            use_multiprocessing=False,
        )
开发者ID:dansbecker,项目名称:keras,代码行数:28,代码来源:test_multiprocessing.py

示例2: test_multiprocessing_predicting

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multiprocessing_predicting():

    reached_end = False

    arr_data = np.random.randint(0, 256, (500, 2))

    def myGenerator():

        batch_size = 32
        n_samples = 500

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')
    model.predict_generator(myGenerator(),
                            val_samples=320,
                            max_q_size=10,
                            nb_worker=2,
                            pickle_safe=True)
    model.predict_generator(myGenerator(),
                            val_samples=320,
                            max_q_size=10,
                            pickle_safe=False)
    reached_end = True

    assert reached_end
开发者ID:alfredplpl,项目名称:keras,代码行数:36,代码来源:test_multiprocessing.py

示例3: test_multiprocessing_predict_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multiprocessing_predict_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.predict_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.predict_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
开发者ID:alfredplpl,项目名称:keras,代码行数:31,代码来源:test_multiprocessing.py

示例4: MLP

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
class MLP(BaseEstimator):
    def __init__(self, verbose=0, model=None, final_activation='sigmoid'):
        self.verbose = verbose
        self.model = model
        self.final_activation = final_activation

    def fit(self, X, y):
        if not self.model:
            self.model = Sequential()
            self.model.add(Dense(1000, input_dim=X.shape[1]))
            self.model.add(Activation('relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(y.shape[1]))
            self.model.add(Activation(self.final_activation))
            self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
        self.model.fit_generator(generator=_batch_generator(X, y, 256, True),
                                 samples_per_epoch=X.shape[0], nb_epoch=20, verbose=self.verbose)

    def predict(self, X):
        pred = self.predict_proba(X)
        return sparse.csr_matrix(pred > 0.2)

    def predict_proba(self, X):
        pred = self.model.predict_generator(generator=_batch_generatorp(X, 512), val_samples=X.shape[0])
        return pred
开发者ID:quadflor,项目名称:Quadflor,代码行数:27,代码来源:neural_net.py

示例5: test_multiprocessing_predicting

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multiprocessing_predicting():
    arr_data = np.random.randint(0, 256, (50, 2))

    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            workers=2,
                            use_multiprocessing=True)
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            use_multiprocessing=False)
    model.predict_generator(custom_generator(),
                            steps=5,
                            max_queue_size=10,
                            workers=0)
开发者ID:dansbecker,项目名称:keras,代码行数:33,代码来源:test_multiprocessing.py

示例6: test_sequential_fit_generator_finite_length

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_sequential_fit_generator_finite_length():
    (X_train, y_train), (X_test, y_test) = _get_test_data(1000,200)

    def data_generator(train, nbatches):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        for i in range(nbatches):
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,), activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nsamples = (len(X_train) // batch_size) * batch_size
    model.fit_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)

    loss = model.evaluate(X_train, y_train)
    assert(loss < 3.0)

    eval_results = model.evaluate_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(eval_results < 3.0)

    predict_results = model.predict_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(predict_results.shape == (nsamples, 4))

    # should fail because not enough samples
    try:
        model.fit_generator(data_generator(True, nsamples//batch_size), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass

    # should fail because generator throws exception
    def bad_generator(gen):
        for i in range(0,20):
            yield next(gen)
        raise Exception("Generator raised an exception")

    try:
        model.fit_generator(bad_generator(data_generator(True, nsamples//batch_size)), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass
开发者ID:sehugg,项目名称:keras,代码行数:51,代码来源:test_sequential_model.py

示例7: test_multithreading_predict_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multithreading_predict_error():
    arr_data = np.random.randint(0, 256, (50, 2))
    good_batches = 3

    @threadsafe_generator
    def custom_generator():
        """Raises an exception after a few good batches"""
        batch_size = 10
        n_samples = 50

        for i in range(good_batches):
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.predict_generator(custom_generator(),
                                steps=good_batches * WORKERS + 1,
                                max_queue_size=10,
                                workers=WORKERS,
                                use_multiprocessing=False)
    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.predict_generator(custom_generator(),
                                steps=good_batches + 1,
                                max_queue_size=10,
                                workers=1,
                                use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.predict_generator(custom_generator(),
                                steps=good_batches + 1,
                                max_queue_size=10,
                                workers=0,
                                use_multiprocessing=False)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:52,代码来源:test_multiprocessing.py

示例8: test_multithreading_predicting

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multithreading_predicting():
    arr_data = np.random.randint(0, 256, (50, 2))

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    model.predict_generator(custom_generator(),
                            steps=STEPS,
                            max_queue_size=10,
                            workers=WORKERS,
                            use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    model.predict_generator(custom_generator(),
                            steps=STEPS,
                            max_queue_size=10,
                            workers=1,
                            use_multiprocessing=False)

    # - Main thread runs the generator without a queue
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.predict_generator(custom_generator(),
                            steps=STEPS,
                            max_queue_size=10,
                            workers=0,
                            use_multiprocessing=False)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:45,代码来源:test_multiprocessing.py

示例9: main

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def main(hdf5_paths, iptagger, n_train, n_test, n_validate):
    '''
    '''

    train_paths = [f for f in hdf5_paths if 'train' in f]
    test_paths = [f for f in hdf5_paths if 'test' in f]
    validate_paths = [f for f in hdf5_paths if 'validate' in f]

    def batch(paths, iptagger, batch_size, random=True):
        while True:
            if random:
                np.random.shuffle(paths)
            for fp in paths:
                d = io.load(fp)
                X = np.concatenate([d['X'], d[iptagger + '_vars']], axis=1)
                le = LabelEncoder()
                y = le.fit_transform(d['y'])
                w = d['w']
                if random:
                    ix = range(X.shape[0])
                    np.random.shuffle(ix)
                    X, y, w = X[ix], y[ix], w[ix]
                for i in xrange(int(np.ceil(X.shape[0] / float(batch_size)))):
                    yield X[(i * batch_size):((i+1)*batch_size)], y[(i * batch_size):((i+1)*batch_size)], w[(i * batch_size):((i+1)*batch_size)]

    def get_n_vars(train_paths, iptagger):
        # with open(train_paths[0], 'rb') as buf:
        #     d = io.load(buf)
        d = io.load(train_paths[0])
        return np.concatenate([d['X'], d[iptagger + '_vars']], axis=1).shape[1]

    net = Sequential()
    net.add(Dense(50, input_shape=(get_n_vars(train_paths, iptagger), ), activation='relu'))
    net.add(Dropout(0.3))
    net.add(Dense(40, activation='relu'))
    net.add(Dropout(0.2))
    net.add(Dense(16, activation='relu'))
    net.add(Dropout(0.1))
    net.add(Dense(16, activation='relu'))
    net.add(Dropout(0.1))
    net.add(Dense(4, activation='softmax'))

    net.summary()
    net.compile('adam', 'sparse_categorical_crossentropy')

    weights_path = './' + iptagger + '-' + MODEL_NAME + '-progress.h5'
    try:
        print 'Trying to load weights from ' + weights_path
        net.load_weights(weights_path)
        print 'Weights found and loaded from ' + weights_path
    except IOError:
        print 'Could not find weight in ' + weights_path

    # -- train 
    try:
        net.fit_generator(batch(train_paths, iptagger, 256, random=True),
        samples_per_epoch = n_train,
        verbose=True, 
        #batch_size=64, 
        #sample_weight=train['w'],
        callbacks = [
            EarlyStopping(verbose=True, patience=100, monitor='val_loss'),
            ModelCheckpoint(weights_path, monitor='val_loss', verbose=True, save_best_only=True)
        ],
        nb_epoch=200, 
        validation_data=batch(validate_paths, iptagger, 64, random=False),
        nb_val_samples=n_validate
        ) 
    except KeyboardInterrupt:
        print '\n Stopping early.'

    # -- load in best network
    print 'Loading best network...'
    net.load_weights(weights_path)

    print 'Extracting...'
    # # -- save the predicions
    #np.save('yhat-{}-{}.npy'.format(iptagger, MODEL_NAME), yhat)

    # from joblib import Parallel, delayed
    # test = Parallel(n_jobs=1, verbose=5, backend="threading")(
    #     delayed(extract)(filepath, ['pt', 'y', 'mv2c10']) for filepath in test_paths
    # )
    
    test = [extract(filepath, ['pt', 'y', 'mv2c10']) for filepath in test_paths]

    # -- test
    print 'Testing...'
    yhat = net.predict_generator(batch(test_paths, iptagger, 2048, random=False), val_samples=n_test)

    def dict_reduce(x, y):
        return {
            k: np.concatenate((v, y[k]))
            for k, v in x.iteritems()
        }
    test = reduce(dict_reduce, test)

    print 'Plotting...'
    _ = performance(yhat, test['y'], test['mv2c10'], iptagger)

#.........这里部分代码省略.........
开发者ID:mickypaganini,项目名称:IPNN,代码行数:103,代码来源:train_DL1_generator.py

示例10: fit

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def fit():
	batch_size = 128
	nb_epoch = 1
	chunk_size = 15000

	# input image dimensions
	img_rows, img_cols = 28, 28
	# number of convolutional filters to use
	nb_filters = 32
	# size of pooling area for max pooling
	nb_pool = 2
	# convolution kernel size
	nb_conv = 3

	#load all the labels for the train and test sets
	y_train = np.loadtxt('labels_train.csv')
	y_test = np.loadtxt('labels_test.csv')
	
	fnames_train = ['train/train'+str(i)+'.png' for i in xrange(len(y_train))]
	fnames_test = ['test/test'+str(i)+'.png' for i in xrange(len(y_test))]
	
	nb_classes = len(np.unique(y_train))

	# convert class vectors to binary class matrices
	Y_train = np_utils.to_categorical(y_train.astype(int), nb_classes)
	Y_test = np_utils.to_categorical(y_test.astype(int), nb_classes)

	model = Sequential()

	model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
							border_mode='valid',
							input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))

	model.add(Flatten())
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(nb_classes))
	model.add(Activation('softmax'))

	model.compile(loss='categorical_crossentropy',
				  optimizer='adadelta',
				  metrics=['accuracy'])

	#model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
	#          verbose=1, validation_data=(X_test, Y_test))

	model.fit_generator(myGenerator(Y_train, chunk_size, batch_size, fnames_train), samples_per_epoch = y_train.shape[0], nb_epoch = nb_epoch, verbose=2,callbacks=[], validation_data=None, class_weight=None) # show_accuracy=True, nb_worker=1 
		  
	'''	  	
	i = 0	
	pred = np.zeros((len(fnames_test), Y_train.shape[1]))
	for X, y in myGenerator(Y_test, chunk_size, batch_size, fnames_test):	
		print('chunk '+str(i))  
		pred[i*chunk_size:(i+1)*chunk_size, :] = model.predict(X, samples_per_epoch = y_train.shape[0], nb_epoch = nb_epoch, verbose=2,callbacks=[], validation_data=None, class_weight=None) # show_accuracy=True, nb_worker=1 
		i += 1
		print(pred[0:10])
	'''	

	pred = model.predict_generator(myGenerator(None, chunk_size, 100, fnames_test), len(fnames_test)) # show_accuracy=True, nb_worker=1 

	#score = model.evaluate(X_test, Y_test, verbose=0)
	#print('Test score:', score[0])
	#print('Test accuracy:', score[1])	
	print( 'Test accuracy:', np.mean(np.argmax(pred, axis=1) == np.argmax(Y_test, axis=1)) )
	
	return pred, Y_test	
开发者ID:SplashDance,项目名称:kaggle_art,代码行数:74,代码来源:mnist_cnn_generator.py

示例11: train_test_split

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
    # Training
    logger.info("Training model, {}".format(model.to_json()))
    X_train, X_val, y_train, y_val = \
        train_test_split(Xtrain, dummy_y, test_size=0.02, random_state=42)

    num_epoch = 15
    batch_gen = batch_generator(X_train, y_train, 32, True)
    fit = model.fit_generator(generator=batch_gen,
                              nb_epoch=num_epoch,
                              samples_per_epoch=69984,
                              validation_data=(X_val.todense(), y_val),
                              verbose=2)

    # Evaluate the model
    scores_val = model.predict_generator(
        generator=batch_predict_generator(X_val, 32, False),
        val_samples=X_val.shape[0])
    scores = model.predict_generator(
        generator=batch_predict_generator(Xtest, 32, False),
        val_samples=Xtest.shape[0])
    logger.info("logloss val {}".format(log_loss(y_val, scores_val)))

    # Get the predicted_probabilities and prepare file for submission
    pred = pd.DataFrame(scores, index = test.index, columns=y_enc.classes_)
    pred = pd.DataFrame(pred, index = test.index, columns=y_enc.classes_)
    ts = time.strftime("%a_%d%b%Y_%H%M%S")
    name_prefix = "sparse_keras_v2_{}epoch_".format(num_epoch)
    file_path = os.path.join("submissions", "%s%s.csv" % (name_prefix, ts))
    pred.to_csv(file_path, index=True)
    u.gzip_file(file_path)
开发者ID:nirmalyaghosh,项目名称:kaggle,代码行数:32,代码来源:td3.py

示例12: open

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
    if predict_index == num_predictions:
        break
开发者ID:AlexeySorokin,项目名称:keras,代码行数:32,代码来源:cifar10_cnn.py

示例13: open

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)
print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
    if predict_index == num_predictions:
        break
开发者ID:cbentes,项目名称:keras,代码行数:32,代码来源:cifar10_cnn.py

示例14: test_multiprocessing_predict_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_multiprocessing_predict_error():
    arr_data = np.random.randint(0, 256, (50, 2))
    good_batches = 3

    @threadsafe_generator
    def custom_generator():
        """Raises an exception after a few good batches"""
        batch_size = 10
        n_samples = 50

        for i in range(good_batches):
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker processes, consume on main process:
    #   - Each worker process runs OWN copy of generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `predict_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    #   - On other platforms, make sure `RuntimeError` exception bubbles up
    if os.name is 'nt':
        with pytest.raises(StopIteration):
            model.predict_generator(custom_generator(),
                                    steps=good_batches * WORKERS + 1,
                                    max_queue_size=10,
                                    workers=WORKERS,
                                    use_multiprocessing=True)
    else:
        with pytest.raises(RuntimeError):
            model.predict_generator(custom_generator(),
                                    steps=good_batches * WORKERS + 1,
                                    max_queue_size=10,
                                    workers=WORKERS,
                                    use_multiprocessing=True)

    # - Produce data on 1 worker process, consume on main process:
    #   - Worker process runs generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `predict_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    #   - On other platforms, make sure `RuntimeError` exception bubbles up
    if os.name is 'nt':
        with pytest.raises(RuntimeError):
            model.predict_generator(custom_generator(),
                                    steps=good_batches + 1,
                                    max_queue_size=10,
                                    workers=1,
                                    use_multiprocessing=True)
    else:
        with pytest.raises(RuntimeError):
            model.predict_generator(custom_generator(),
                                    steps=good_batches + 1,
                                    max_queue_size=10,
                                    workers=1,
                                    use_multiprocessing=True)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.predict_generator(custom_generator(),
                                steps=good_batches + 1,
                                max_queue_size=10,
                                workers=0,
                                use_multiprocessing=True)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:75,代码来源:test_multiprocessing.py

示例15: test_sequential

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_generator [as 别名]
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(X_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test)

    prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0], max_q_size=2)
    gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0], max_q_size=2)
    pred_loss = K.eval(K.mean(objectives.get(model.loss)(K.variable(y_test), K.variable(prediction))))

    assert np.isclose(pred_loss, loss)
    assert np.isclose(gen_loss, loss)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
开发者ID:CheRaissi,项目名称:keras,代码行数:67,代码来源:test_sequential_model.py


注:本文中的keras.models.Sequential.predict_generator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。