当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.evaluate_generator方法代码示例

本文整理汇总了Python中keras.models.Sequential.evaluate_generator方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.evaluate_generator方法的具体用法?Python Sequential.evaluate_generator怎么用?Python Sequential.evaluate_generator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.evaluate_generator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_multiprocessing_evaluate_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multiprocessing_evaluate_error():
    batch_size = 10
    good_batches = 3

    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (50, 2)),
                   np.random.randint(batch_size, 2, 50))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    with pytest.raises(StopIteration):
        model.evaluate_generator(
            custom_generator(), good_batches + 1, 1,
            workers=4, use_multiprocessing=True,
        )

    with pytest.raises(StopIteration):
        model.evaluate_generator(
            custom_generator(), good_batches + 1, 1,
            use_multiprocessing=False,
        )
开发者ID:dansbecker,项目名称:keras,代码行数:28,代码来源:test_multiprocessing.py

示例2: test_image_data_generator_training

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
开发者ID:TNonet,项目名称:keras,代码行数:34,代码来源:test_image_data_tasks.py

示例3: test_multiprocessing_evaluate_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multiprocessing_evaluate_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.evaluate_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.evaluate_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
开发者ID:alfredplpl,项目名称:keras,代码行数:31,代码来源:test_multiprocessing.py

示例4: try_params

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def try_params( n_iterations, params, data=None, datamode='memory'):

	print "iterations:", n_iterations
	print_params( params )

        batchsize = 100
        if datamode == 'memory':
            X_train, Y_train = data['train']
            X_valid, Y_valid = data['valid']
            inputshape = X_train.shape[1:]
        else:
            train_generator = data['train']['gen_func'](batchsize, data['train']['path'])
            valid_generator = data['valid']['gen_func'](batchsize, data['valid']['path'])
            train_epoch_step = data['train']['n_sample'] / batchsize
            valid_epoch_step = data['valid']['n_sample'] / batchsize
            inputshape = data['train']['gen_func'](batchsize, data['train']['path']).next()[0].shape[1:]

        model = Sequential()
	model.add(Conv2D(128, (1, 24), padding='same', input_shape=inputshape, activation='relu'))
        model.add(GlobalMaxPooling2D())

        model.add(Dense(32,activation='relu'))
        model.add(Dropout(params['DROPOUT']))
        model.add(Dense(2))
        model.add(Activation('softmax'))

        optim = Adadelta
        myoptimizer = optim(epsilon=params['DELTA'], rho=params['MOMENT'])
        mylossfunc = 'categorical_crossentropy'
        model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])

        early_stopping = EarlyStopping( monitor = 'val_loss', patience = 3, verbose = 0 )

        if datamode == 'memory':
            model.fit(
                    X_train,
                    Y_train,
                    batch_size=batchsize,
                    epochs=int( round( n_iterations )),
                    validation_data=(X_valid, Y_valid),
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate(X_valid,Y_valid)
        else:
            model.fit_generator(
                    train_generator,
                    steps_per_epoch=train_epoch_step,
                    epochs=int( round( n_iterations )),
                    validation_data=valid_generator,
                    validation_steps=valid_epoch_step,
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate_generator(valid_generator, steps=valid_epoch_step)

	return { 'loss': score, 'model': (model.to_json(), optim, myoptimizer.get_config(), mylossfunc) }
开发者ID:zhouyu,项目名称:Keras-genomics,代码行数:55,代码来源:model.py

示例5: test_multiprocessing_evaluating

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multiprocessing_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
开发者ID:dansbecker,项目名称:keras,代码行数:37,代码来源:test_multiprocessing.py

示例6: test_multiprocessing_evaluating

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multiprocessing_evaluating():

    reached_end = False

    arr_data = np.random.randint(0,256, (500, 200))
    arr_labels = np.random.randint(0, 2, 500)

    def myGenerator():

        batch_size = 32
        n_samples = 500

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(10, input_shape=(200, )))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('linear'))
    model.compile(loss='mse', optimizer='adadelta')

    model.evaluate_generator(myGenerator(),
                             val_samples=320,
                             max_q_size=10,
                             nb_worker=2,
                             pickle_safe=True)
    model.evaluate_generator(myGenerator(),
                             val_samples=320,
                             max_q_size=10,
                             pickle_safe=False)
    reached_end = True

    assert reached_end
开发者ID:Albocal,项目名称:keras,代码行数:42,代码来源:test_multiprocessing.py

示例7: test_sequential_fit_generator_finite_length

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_sequential_fit_generator_finite_length():
    (X_train, y_train), (X_test, y_test) = _get_test_data(1000,200)

    def data_generator(train, nbatches):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        for i in range(nbatches):
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,), activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nsamples = (len(X_train) // batch_size) * batch_size
    model.fit_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)

    loss = model.evaluate(X_train, y_train)
    assert(loss < 3.0)

    eval_results = model.evaluate_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(eval_results < 3.0)

    predict_results = model.predict_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(predict_results.shape == (nsamples, 4))

    # should fail because not enough samples
    try:
        model.fit_generator(data_generator(True, nsamples//batch_size), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass

    # should fail because generator throws exception
    def bad_generator(gen):
        for i in range(0,20):
            yield next(gen)
        raise Exception("Generator raised an exception")

    try:
        model.fit_generator(bad_generator(data_generator(True, nsamples//batch_size)), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass
开发者ID:sehugg,项目名称:keras,代码行数:51,代码来源:test_sequential_model.py

示例8: test_multithreading_evaluate_error

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multithreading_evaluate_error():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)
    batch_size = 10
    n_samples = 50
    good_batches = 3

    @threadsafe_generator
    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches * WORKERS + 1,
                                 max_queue_size=10,
                                 workers=WORKERS,
                                 use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches + 1,
                                 max_queue_size=10,
                                 workers=1,
                                 use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches + 1,
                                 max_queue_size=10,
                                 workers=0,
                                 use_multiprocessing=False)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:54,代码来源:test_multiprocessing.py

示例9: test_multithreading_evaluating

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multithreading_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=WORKERS,
                             use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=1,
                             use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=0,
                             use_multiprocessing=False)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:47,代码来源:test_multiprocessing.py

示例10: test_multiprocessing_evaluating

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_multiprocessing_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker processes, consume on main process:
    #   - Each worker process runs OWN copy of generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries
    #       -> make sure `evaluate_generator()` raises raises ValueError
    #          exception and does not attempt to run the generator.
    if os.name is 'nt':
        with pytest.raises(ValueError):
            model.evaluate_generator(custom_generator(),
                                     steps=STEPS,
                                     max_queue_size=10,
                                     workers=WORKERS,
                                     use_multiprocessing=True)
    else:
        model.evaluate_generator(custom_generator(),
                                 steps=STEPS,
                                 max_queue_size=10,
                                 workers=WORKERS,
                                 use_multiprocessing=True)

    # - Produce data on 1 worker process, consume on main process:
    #   - Worker process runs generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `evaluate_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    if os.name is 'nt':
        with pytest.raises(ValueError):
            model.evaluate_generator(custom_generator(),
                                     steps=STEPS,
                                     max_queue_size=10,
                                     workers=1,
                                     use_multiprocessing=True)
    else:
        model.evaluate_generator(custom_generator(),
                                 steps=STEPS,
                                 max_queue_size=10,
                                 workers=1,
                                 use_multiprocessing=True)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=0,
                             use_multiprocessing=True)
开发者ID:ZhangXinNan,项目名称:keras,代码行数:70,代码来源:test_multiprocessing.py

示例11: test_sequential

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    gen_loss = model.evaluate_generator(data_generator(True), 256, verbose=0)
    assert(gen_loss < 0.8)

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
开发者ID:AI42,项目名称:keras,代码行数:69,代码来源:test_models.py

示例12: model_vt

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]

#.........这里部分代码省略.........
                            b_regularizer=l2(0.001),
                            ))
        logging.debug("Layer8:Dense shape={0}".format(self._mdl.output_shape))

        #Activation Softmax
        self._mdl.add(Activation("softmax"))

        # compile model
        self._mdl.compile(loss='categorical_crossentropy', optimizer=self._optimizer, metrics=["accuracy"])
        logging.info("Model compiled!")

    def fit(self, generator, samples_per_epoch,
            nb_epoch, valid_generator, nb_valid_samples, verbosity):
        """

        Args:
            generator: training sample generator from loader.train_generator
            samples_per_epoch: number of train sample per epoche from loader.return_train_samples
            nb_epoch: number of epochs to repeat traininf on full set
            valid_generator: validation sample generator from loader.valid_generator or NONE else
            nb_valid_samples: number of validation samples per epoche from loader.return_valid_samples
            verbosity: 0 (no output), 1 (full output), 2 (output only after epoche)

        """
        logging.info("Start training")
        self._mdl.fit_generator(generator=generator,
                                samples_per_epoch=samples_per_epoch,
                                nb_epoch=nb_epoch,
                                verbose=verbosity,
                                callbacks=[ #self._lr_schedule,
                                        self._mdl_checkpoint,],
                                validation_data=valid_generator,
                                nb_val_samples=nb_valid_samples,
                                )

        time_now = datetime.datetime.now()
        time_now = "_{0}_{1}_{2}_{3}_{4}_{5}".format(time_now.year, time_now.month, time_now.day,
                                                     time_now.hour, time_now.minute, time_now.second)
        logging.info("save model Voxnet weights as weights_{0}.h5".format(time_now))
        self._mdl.save_weights("weights_{0}.h5".format(time_now), False)

    def continue_fit(self, weights_file, generator, samples_per_epoch,
                     nb_epoch, valid_generator, nb_valid_samples, verbosity):
        """

        Args:
            weights_file: filename and adress of weights file .hdf5
            generator: training sample generator from loader.train_generator
            samples_per_epoch: number of train sample per epoche from loader.return_train_samples
            nb_epoch: number of epochs to repeat traininf on full set
            valid_generator: validation sample generator from loader.valid_generator or NONE else
            nb_valid_samples: number of validation samples per epoche from loader.return_valid_samples
            verbosity: 0 (no output), 1 (full output), 2 (output only after epoche)

        """
        self.load_weights(weights_file)
        self._mdl.fit_generator(generator=generator,
                            samples_per_epoch=samples_per_epoch,
                            nb_epoch=nb_epoch,
                            verbose=verbosity,
                            callbacks=[ #self._lr_schedule,
                                    self._mdl_checkpoint,],
                            validation_data=valid_generator,
                            nb_val_samples=nb_valid_samples,
                            )

    def evaluate(self, evaluation_generator, num_eval_samples):
        """

        Args:
            evaluation_generator: evaluation sample generator from loader.eval_generator
            num_eval_samples: number of train sample per epoche from loader.return_eval_samples

        """
        self._score = self._mdl.evaluate_generator(
            generator=evaluation_generator,
            val_samples=num_eval_samples)
        print("Test score:", self._score)

    def load_weights(self, file):
        """

        Args:
            file: filename and adress of weights file .hdf5

        """
        logging.info("Loading model weights from file '{0}'".format(file))
        self._mdl.load_weights(file)

    def predict(self, X_predict):
        """

        Args:
            X_predict: Features to use to predict labels, numpy ndarray shape [~,1,32,32,32]

        returns:
            Probability for every label

        """
        return self._mdl.predict_proba(X_predict, verbose=0)
开发者ID:tobiagru,项目名称:Deep-3D-Obj-Recognition,代码行数:104,代码来源:model_keras.py

示例13: open

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
# Load label names to use in prediction results
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
    if predict_index == num_predictions:
        break
开发者ID:AlexeySorokin,项目名称:keras,代码行数:33,代码来源:cifar10_cnn.py

示例14: open

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)
print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
开发者ID:cbentes,项目名称:keras,代码行数:33,代码来源:cifar10_cnn.py

示例15: train_neural_network

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import evaluate_generator [as 别名]
    def train_neural_network(self):
        train_generator, validation_generator, test_datagen = self.prepare_data()
        num_classes = 53
        input_shape = (50, 15, 3)
        epochs = 17

        model = Sequential()
        model.add(Conv2D(64, (3, 3), input_shape=input_shape, activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dropout(0.2))
        model.add(Dense(2048, activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.2))
        model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(loss=keras.losses.sparse_categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(),
                      metrics=['accuracy'])

        early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                   min_delta=0,
                                                   patience=1,
                                                   verbose=1, mode='auto')
        tb = TensorBoard(log_dir='c:/tensorboard/pb',
                         histogram_freq=1,
                         write_graph=True,
                         write_images=True,
                         embeddings_freq=1,
                         embeddings_layer_names=False,
                         embeddings_metadata=False)

        model.fit_generator(train_generator,
                            steps_per_epoch=num_classes,
                            epochs=epochs,
                            verbose=1,
                            validation_data=validation_generator,
                            validation_steps=100,
                            callbacks=[early_stop])
        score = model.evaluate_generator(test_datagen, steps=52)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

        class_mapping = train_generator.class_indices

        # serialize model to JSON
        class_mapping = dict((v, k) for k, v in class_mapping.items())
        with open(dir_path + "/model_classes.json", "w") as json_file:
            json.dump(class_mapping, json_file)
        model_json = model.to_json()
        with open(dir_path + "/model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights(dir_path + "/model.h5")
        print("Saved model to disk")
开发者ID:dickreuter,项目名称:Poker,代码行数:68,代码来源:card_neural_network.py


注:本文中的keras.models.Sequential.evaluate_generator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。