當前位置: 首頁>>代碼示例>>Python>>正文


Python Callbacks.add_serialize_callback方法代碼示例

本文整理匯總了Python中neon.callbacks.callbacks.Callbacks.add_serialize_callback方法的典型用法代碼示例。如果您正苦於以下問題:Python Callbacks.add_serialize_callback方法的具體用法?Python Callbacks.add_serialize_callback怎麽用?Python Callbacks.add_serialize_callback使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在neon.callbacks.callbacks.Callbacks的用法示例。


在下文中一共展示了Callbacks.add_serialize_callback方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from neon.callbacks.callbacks import Callbacks [as 別名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_serialize_callback [as 別名]
def main():
    # setup the model and run for num_epochs saving the last state only
    # this is at the top so that the be is generated
    mlp = gen_model(args.backend)

    # setup data iterators
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
    if args.backend == 'nervanacpu' or args.backend == 'cpu':
        # limit data since cpu backend runs slower
        train = DataIterator(X_train[:1000], y_train[:1000], nclass=nclass, lshape=(1, 28, 28))
        valid = DataIterator(X_test[:1000], y_test[:1000], nclass=nclass, lshape=(1, 28, 28))
    else:
        train = DataIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
        valid = DataIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))

    # serialization related
    cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)

    checkpoint_model_path = os.path.join('./', 'test_oneshot.pkl')
    checkpoint_schedule = 1  # save at every step

    callbacks = Callbacks(mlp, train)
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path, history=2)

    # run the fit all the way through saving a checkpoint e
    mlp.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)

    # setup model with same random seed run epoch by epoch
    # serializing and deserializing at each step
    mlp = gen_model(args.backend)
    cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)

    # reset data iterators
    train.reset()
    valid.reset()

    checkpoint_model_path = os.path.join('./', 'test_manyshot.pkl')
    checkpoint_schedule = 1  # save at evey step
    callbacks = Callbacks(mlp, train)
    callbacks.add_serialize_callback(checkpoint_schedule,
                                     checkpoint_model_path,
                                     history=num_epochs)
    for epoch in range(num_epochs):
        # _0 points to state at end of epoch 0
        mlp.fit(train, optimizer=opt_gdm, num_epochs=epoch+1, cost=cost, callbacks=callbacks)

        # load saved file
        prts = os.path.splitext(checkpoint_model_path)
        fn = prts[0] + '_%d' % epoch + prts[1]
        mlp.load_weights(fn)  # load the saved weights

    # compare test_oneshot_<num_epochs>.pkl to test_manyshot_<num_epochs>.pkl
    try:
        compare_model_pickles('test_oneshot_%d.pkl' % (num_epochs-1),
                              'test_manyshot_%d.pkl' % (num_epochs-1))
    except:
        print 'test failed....'
        sys.exit(1)
開發者ID:ferenckulcsar,項目名稱:neon,代碼行數:62,代碼來源:serialization_check.py

示例2: train

# 需要導入模塊: from neon.callbacks.callbacks import Callbacks [as 別名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_serialize_callback [as 別名]
    def train(self, dataset, model=None):
        """Trains the passed model on the given dataset. If no model is passed, `generate_default_model` is used."""
        print "Starting training..."
        start = time.time()

        # The training will be run on the CPU. If a GPU is available it should be used instead.
        backend = gen_backend(backend='cpu',
                              batch_size=self.batch_size,
                              rng_seed=self.random_seed,
                              stochastic_round=False)

        cost = GeneralizedCost(
            name='cost',
            costfunc=CrossEntropyMulti())

        optimizer = GradientDescentMomentum(
            learning_rate=self.lrate,
            momentum_coef=0.9)

        # set up the model and experiment
        if not model:
            model = self.generate_default_model(dataset.num_labels)

        callbacks = Callbacks(model, dataset.train(),
                              output_file=os.path.join(self.root_path, self.Callback_Store_Filename),
                              progress_bar=True,
                              valid_set=dataset.test(),
                              valid_freq=1)

        # add a callback that saves the best model state
        callbacks.add_save_best_state_callback(self.model_path)
        callbacks.add_serialize_callback(
            serialize_schedule=1,
            save_path=os.path.join(self.root_path, self.Intermediate_Model_Filename),
            history=100)

        # Uncomment line below to run on GPU using cudanet backend
        # backend = gen_backend(rng_seed=0, gpu='cudanet')
        model.fit(
            dataset.train(),
            optimizer=optimizer,
            num_epochs=self.max_epochs,
            cost=cost,
            callbacks=callbacks)

        print('Misclassification error = %.1f%%'
              % (model.eval(dataset.test(), metric=Misclassification()) * 100))
        print "Finished training!"
        end = time.time()
        print "Duration", end - start, "seconds"

        return model
開發者ID:datastax-demos,項目名稱:Muvr-Analytics,代碼行數:54,代碼來源:mlp_model.py

示例3: Callbacks

# 需要導入模塊: from neon.callbacks.callbacks import Callbacks [as 別名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_serialize_callback [as 別名]
if args.model_file:
    import os
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    mlp.load_weights(args.model_file)

# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)

if args.validation_freq:
    class TopKMetrics(Callback):
        def __init__(self, valid_set, epoch_freq=args.validation_freq):
            super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
            self.valid_set = valid_set

        def on_epoch_end(self, epoch):
            self.valid_set.reset()
            allmetrics = TopKMisclassification(k=5)
            stats = mlp.eval(self.valid_set, metric=allmetrics)
            print ", ".join(allmetrics.metric_names) + ": " + ", ".join(map(str, stats.flatten()))

    callbacks.add_callback(TopKMetrics(test))

if args.save_path:
    checkpoint_schedule = range(1, args.epochs)
    callbacks.add_serialize_callback(checkpoint_schedule, args.save_path, history=2)

mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

test.exit_batch_provider()
train.exit_batch_provider()
開發者ID:sunclx,項目名稱:neon,代碼行數:32,代碼來源:alexnet.py

示例4: RBM

# 需要導入模塊: from neon.callbacks.callbacks import Callbacks [as 別名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_serialize_callback [as 別名]
             'step_config': 1,
             'learning_rate': 0.1,
             'weight_decay': 0}

# initialize model object
rbm = RBM(layers=layers)

if args.model_file:
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    logger.info('loading initial model state from %s' % args.model_file)
    rbm.load_weights(args.model_file)

# setup standard fit callbacks
callbacks = Callbacks(rbm, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar)

# add a callback ot calculate

if args.serialize > 0:
    # add callback for saving checkpoint file
    # every args.serialize epchs
    checkpoint_schedule = args.serialize
    checkpoint_model_path = args.save_path
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)

rbm.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)

for mb_idx, (x_val, y_val) in enumerate(valid_set):
    hidden = rbm.fprop(x_val)
    break
開發者ID:yeahrmek,項目名稱:3dShapeNets,代碼行數:32,代碼來源:mnist_multilayer.py

示例5: LSTM

# 需要導入模塊: from neon.callbacks.callbacks import Callbacks [as 別名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_serialize_callback [as 別名]
layers = [
    LSTM(hidden_size, init, Logistic(), Tanh()),
    Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)

cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))

optimizer = RMSProp(clip_gradients=clip_gradients, stochastic_round=args.rounding)

# configure callbacks
callbacks = Callbacks(model, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar,
                      valid_set=valid_set, valid_freq=1,
                      )
callbacks.add_serialize_callback(1, args.save_path)

# fit and validate
model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)


def sample(prob):
    """
    Sample index from probability distribution
    """
    prob = prob / (prob.sum() + 1e-6)
    return np.argmax(np.random.multinomial(1, prob, 1))

# Set batch size and time_steps to 1 for generation and reset buffers
be.bsz = 1
time_steps = 1
開發者ID:ZebTech,項目名稱:neon,代碼行數:33,代碼來源:text_generation_lstm.py


注:本文中的neon.callbacks.callbacks.Callbacks.add_serialize_callback方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。