当前位置: 首页>>代码示例>>Python>>正文


Python extensions.PrintReport方法代码示例

本文整理汇总了Python中chainer.training.extensions.PrintReport方法的典型用法代码示例。如果您正苦于以下问题:Python extensions.PrintReport方法的具体用法?Python extensions.PrintReport怎么用?Python extensions.PrintReport使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.training.extensions的用法示例。


在下文中一共展示了extensions.PrintReport方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _setup

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def _setup(self, stream=None, delete_flush=False):
        self.logreport = mock.MagicMock(spec=extensions.LogReport(
            ['epoch'], trigger=(1, 'iteration'), log_name=None))
        if stream is None:
            self.stream = mock.MagicMock()
            if delete_flush:
                del self.stream.flush
        else:
            self.stream = stream
        self.report = extensions.PrintReport(
            ['epoch'], log_report=self.logreport, out=self.stream)

        self.trainer = testing.get_trainer_with_mock_updater(
            stop_trigger=(1, 'iteration'))
        self.trainer.extend(self.logreport)
        self.trainer.extend(self.report)
        self.logreport.log = [{'epoch': 0}] 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:test_print_report.py

示例2: main

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def main(model):
    train = read_data(fold=BUZZER_TRAIN_FOLD)
    valid = read_data(fold=BUZZER_DEV_FOLD)
    print('# train data: {}'.format(len(train)))
    print('# valid data: {}'.format(len(valid)))

    train_iter = chainer.iterators.SerialIterator(train, 64)
    valid_iter = chainer.iterators.SerialIterator(valid, 64, repeat=False, shuffle=False)

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))

    updater = training.updaters.StandardUpdater(train_iter, optimizer, converter=convert_seq, device=0)
    trainer = training.Trainer(updater, (20, 'epoch'), out=model.model_dir)

    trainer.extend(extensions.Evaluator(valid_iter, model, converter=convert_seq, device=0))

    record_trigger = training.triggers.MaxValueTrigger('validation/main/accuracy', (1, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'buzzer.npz'), trigger=record_trigger)

    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.PrintReport([
        'epoch', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'elapsed_time'
    ]))

    if not os.path.isdir(model.model_dir):
        os.mkdir(model.model_dir)

    trainer.run() 
开发者ID:Pinafore,项目名称:qb,代码行数:34,代码来源:train.py

示例3: train

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def train(args):
    nz = args.nz
    batch_size = args.batch_size
    epochs = args.epochs
    gpu = args.gpu

    # CIFAR-10 images in range [-1, 1] (tanh generator outputs)
    train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2)
    train -= 1.0
    train_iter = iterators.SerialIterator(train, batch_size)

    z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
                                 batch_size)

    optimizer_generator = optimizers.RMSprop(lr=0.00005)
    optimizer_critic = optimizers.RMSprop(lr=0.00005)
    optimizer_generator.setup(Generator())
    optimizer_critic.setup(Critic())

    updater = WassersteinGANUpdater(
        iterator=train_iter,
        noise_iterator=z_iter,
        optimizer_generator=optimizer_generator,
        optimizer_critic=optimizer_critic,
        device=gpu)

    trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch'))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
    trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
    trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss',
            'critic/loss/real', 'critic/loss/fake', 'generator/loss']))
    trainer.run() 
开发者ID:hvy,项目名称:chainer-wasserstein-gan,代码行数:35,代码来源:train.py

示例4: check_train

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def check_train(self, gpu):
        outdir = tempfile.mkdtemp()
        print("outdir: {}".format(outdir))

        n_classes = 2
        batch_size = 32

        devices = {'main': gpu}

        A = np.array([
            [0, 1, 1, 0],
            [1, 0, 0, 1],
            [1, 0, 0, 0],
            [0, 1, 0, 0],
        ]).astype(np.float32)
        model = graph_cnn.GraphCNN(A, n_out=n_classes)

        optimizer = optimizers.Adam(alpha=1e-4)
        optimizer.setup(model)
        train_dataset = EasyDataset(train=True, n_classes=n_classes)
        train_iter = chainer.iterators.MultiprocessIterator(
            train_dataset, batch_size)
        updater = ParallelUpdater(train_iter, optimizer, devices=devices)
        trainer = chainer.training.Trainer(updater, (10, 'epoch'), out=outdir)
        trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
        trainer.extend(extensions.PrintReport(
            ['epoch', 'iteration', 'main/loss', 'main/accuracy']))
        trainer.extend(extensions.ProgressBar())
        trainer.run() 
开发者ID:pfnet-research,项目名称:chainer-graph-cnn,代码行数:31,代码来源:test_graph_cnn.py

示例5: _train_trainer

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def _train_trainer(self, examples):
        """Training with chainer trainer module"""
        train_iter = SerialIterator(examples, args.batch_size)
        optimizer = optimizers.Adam(alpha=args.lr)
        optimizer.setup(self.nnet)

        def loss_func(boards, target_pis, target_vs):
            out_pi, out_v = self.nnet(boards)
            l_pi = self.loss_pi(target_pis, out_pi)
            l_v = self.loss_v(target_vs, out_v)
            total_loss = l_pi + l_v
            chainer.reporter.report({
                'loss': total_loss,
                'loss_pi': l_pi,
                'loss_v': l_v,
            }, observer=self.nnet)
            return total_loss

        updater = training.StandardUpdater(
            train_iter, optimizer, device=args.device, loss_func=loss_func, converter=converter)
        # Set up the trainer.
        trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.out)
        # trainer.extend(extensions.snapshot(), trigger=(args.epochs, 'epoch'))
        trainer.extend(extensions.LogReport())
        trainer.extend(extensions.PrintReport([
            'epoch', 'main/loss', 'main/loss_pi', 'main/loss_v', 'elapsed_time']))
        trainer.extend(extensions.ProgressBar(update_interval=10))
        trainer.run() 
开发者ID:suragnair,项目名称:alpha-zero-general,代码行数:30,代码来源:NNet.py

示例6: main

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def main():
  # Training settings
  args = get_args()

  # Set up a neural network to train
  model = L.Classifier(Net())

  if args.gpu >= 0:
    # Make a specified GPU current
    chainer.backends.cuda.get_device_from_id(args.gpu).use()
    model.to_gpu() # Copy the model to the GPU

  # Setup an optimizer
  optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
  optimizer.setup(model)

  # Load the MNIST dataset
  train, test = chainer.datasets.get_mnist(ndim=3)
  train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
  test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
                                               repeat=False, shuffle=False)

  # Set up a trainer
  updater = training.updaters.StandardUpdater(
      train_iter, optimizer, device=args.gpu)
  trainer = training.Trainer(updater, (args.epochs, 'epoch'))

  # Evaluate the model with the test dataset for each epoch
  trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

  # Write a log of evaluation statistics for each epoch
  trainer.extend(extensions.LogReport())

  # Print selected entries of the log to stdout
  trainer.extend(extensions.PrintReport(
      ['epoch', 'main/loss', 'validation/main/loss',
       'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

  # Send selected entries of the log to CMLE HP tuning system
  trainer.extend(
    HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))

  if args.resume:
    # Resume from a snapshot
    tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
    if not os.path.exists(tmp_model_file):
      subprocess.check_call([
        'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
        tmp_model_file])
    if os.path.exists(tmp_model_file):
      chainer.serializers.load_npz(tmp_model_file, trainer)
  
  trainer.run()

  if args.model_dir:
    tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
    serializers.save_npz(tmp_model_file, model)
    subprocess.check_call([
        'gsutil', 'cp', tmp_model_file,
        os.path.join(args.model_dir, MODEL_FILE_NAME)]) 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:62,代码来源:mnist.py

示例7: run_training

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def run_training(args, model):
    trainer = create_trainer(args, model)

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    if args.plot and extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch', file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run() 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:44,代码来源:gen_resnet50.py

示例8: main_impl

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def main_impl(args):
    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MLP(args.unit, 10)
    # classifier = L.Classifier(model)
    classifier = MyClassifier(model, compute_accuracy=args.run_training)
    model = classifier

    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    if args.run_training:
        run_training(args, model)
        return

    out_dir = 'out/backprop_test_mnist_mlp'

    x = np.random.random((args.batchsize, 784)).astype(np.float32)
    y = (np.random.random(args.batchsize) * 10).astype(np.int32)
    onehot = np.eye(10, dtype=x.dtype)[y]
    x = chainer.Variable(x, name='input')
    onehot = chainer.Variable(onehot, name='onehot')

    chainer.disable_experimental_feature_warning = True
    shutil.rmtree(out_dir, ignore_errors=True)
    onnx_chainer.export_testcase(model,
                                 (x, onehot),
                                 out_dir,
                                 output_grad=True,
                                 output_names='loss')

    # Revive this code if we need to test parameter update.
    #
    # trainer = create_trainer(args, model)
    # for step in range(2):
    #     trainer.updater.update()
    #     npz_filename = '%s/params_%d.npz' % (out_dir, step)
    #     params_dir = '%s/params_%d' % (out_dir, step)
    #     chainer.serializers.save_npz(npz_filename, model)
    #     makedirs(params_dir)
    #     npz_to_onnx.npz_to_onnx(npz_filename, os.path.join(params_dir, 'param')) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:46,代码来源:gen_mnist_mlp.py

示例9: prepare_trainer

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def prepare_trainer(net,
                    optimizer_name,
                    lr,
                    momentum,
                    num_epochs,
                    train_data,
                    val_data,
                    logging_dir_path,
                    use_gpus):
    if optimizer_name == "sgd":
        optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
    elif optimizer_name == "nag":
        optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    else:
        raise Exception("Unsupported optimizer: {}".format(optimizer_name))
    optimizer.setup(net)

    # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
    devices = (0,) if use_gpus else (-1,)

    updater = training.updaters.StandardUpdater(
        iterator=train_data["iterator"],
        optimizer=optimizer,
        device=devices[0])
    trainer = training.Trainer(
        updater=updater,
        stop_trigger=(num_epochs, "epoch"),
        out=logging_dir_path)

    val_interval = 100000, "iteration"
    log_interval = 1000, "iteration"

    trainer.extend(
        extension=extensions.Evaluator(
            iterator=val_data["iterator"],
            target=net,
            device=devices[0]),
        trigger=val_interval)
    trainer.extend(extensions.dump_graph("main/loss"))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(
        extensions.snapshot_object(
            net,
            "model_iter_{.updater.iteration}"),
        trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(
        extensions.PrintReport([
            "epoch", "iteration", "main/loss", "validation/main/loss", "main/accuracy", "validation/main/accuracy",
            "lr"]),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    return trainer 
开发者ID:osmr,项目名称:imgclsmob,代码行数:57,代码来源:train_ch.py

示例10: prepare_trainer

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def prepare_trainer(net,
                    optimizer_name,
                    lr,
                    momentum,
                    num_epochs,
                    train_iter,
                    val_iter,
                    logging_dir_path,
                    num_gpus=0):
    if optimizer_name == "sgd":
        optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
    elif optimizer_name == "nag":
        optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    else:
        raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
    optimizer.setup(net)

    # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
    devices = (0,) if num_gpus > 0 else (-1,)

    updater = training.updaters.StandardUpdater(
        iterator=train_iter,
        optimizer=optimizer,
        device=devices[0])
    trainer = training.Trainer(
        updater=updater,
        stop_trigger=(num_epochs, 'epoch'),
        out=logging_dir_path)

    val_interval = 100000, 'iteration'
    log_interval = 1000, 'iteration'

    trainer.extend(
        extension=extensions.Evaluator(
            val_iter,
            net,
            device=devices[0]),
        trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(
        extensions.snapshot_object(
            net,
            'model_iter_{.updater.iteration}'),
        trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
            'lr']),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    return trainer 
开发者ID:osmr,项目名称:imgclsmob,代码行数:57,代码来源:train_ch_cifar.py

示例11: train

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def train(train_data_path, test_data_path, args):
    device = chainer.get_device(args.device)
    device.use()

    vocab = collections.defaultdict(lambda: len(vocab))
    vocab['<unk>'] = 0

    train_data = babi.read_data(vocab, train_data_path)
    test_data = babi.read_data(vocab, test_data_path)
    print('Training data: %s: %d' % (train_data_path, len(train_data)))
    print('Test data: %s: %d' % (test_data_path, len(test_data)))

    train_data = memnn.convert_data(train_data, args.max_memory)
    test_data = memnn.convert_data(test_data, args.max_memory)

    encoder = memnn.make_encoder(args.sentence_repr)
    network = memnn.MemNN(
        args.unit, len(vocab), encoder, args.max_memory, args.hop)
    model = chainer.links.Classifier(network, label_key='answer')
    opt = chainer.optimizers.Adam()

    model.to_device(device)

    opt.setup(model)

    train_iter = chainer.iterators.SerialIterator(
        train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(
        test_data, args.batchsize, repeat=False, shuffle=False)
    updater = chainer.training.StandardUpdater(train_iter, opt, device=device)
    trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))

    @chainer.training.make_extension()
    def fix_ignore_label(trainer):
        network.fix_ignore_label()

    trainer.extend(fix_ignore_label)
    trainer.extend(extensions.Evaluator(test_iter, model, device=device))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.run()

    if args.model:
        memnn.save_model(args.model, model, vocab) 
开发者ID:chainer,项目名称:chainer,代码行数:49,代码来源:train_memnn.py

示例12: get_trainer

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def get_trainer(optimizer, iter_train, iter_valid, iter_valid_raw,
                class_names, args):
    model = optimizer.target

    updater = chainer.training.StandardUpdater(
        iter_train, optimizer, device=args.gpu)

    trainer = chainer.training.Trainer(
        updater, (args.max_iteration, 'iteration'), out=args.out)

    trainer.extend(fcn.extensions.ParamsReport(args.__dict__))

    trainer.extend(extensions.ProgressBar(update_interval=5))

    trainer.extend(extensions.LogReport(
        trigger=(args.interval_print, 'iteration')))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'iteration', 'elapsed_time',
         'main/loss', 'validation/main/miou']))

    def pred_func(x):
        model(x)
        return model.score

    trainer.extend(
        fcn.extensions.SemanticSegmentationVisReport(
            pred_func, iter_valid_raw,
            transform=fcn.datasets.transform_lsvrc2012_vgg16,
            class_names=class_names, device=args.gpu, shape=(4, 2)),
        trigger=(args.interval_eval, 'iteration'))

    trainer.extend(
        chainercv.extensions.SemanticSegmentationEvaluator(
            iter_valid, model, label_names=class_names),
        trigger=(args.interval_eval, 'iteration'))

    trainer.extend(extensions.snapshot_object(
        target=model, filename='model_best.npz'),
        trigger=chainer.training.triggers.MaxValueTrigger(
            key='validation/main/miou',
            trigger=(args.interval_eval, 'iteration')))

    assert extensions.PlotReport.available()
    trainer.extend(extensions.PlotReport(
        y_keys=['main/loss'], x_key='iteration',
        file_name='loss.png', trigger=(args.interval_print, 'iteration')))
    trainer.extend(extensions.PlotReport(
        y_keys=['validation/main/miou'], x_key='iteration',
        file_name='miou.png', trigger=(args.interval_print, 'iteration')))

    return trainer 
开发者ID:wkentaro,项目名称:fcn,代码行数:53,代码来源:train_fcn32s.py

示例13: train

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def train(train_data_path, test_data_path, args):
    vocab = collections.defaultdict(lambda: len(vocab))
    vocab['<unk>'] = 0

    train_data = babi.read_data(vocab, train_data_path)
    test_data = babi.read_data(vocab, test_data_path)
    print('Training data: %s: %d' % (train_data_path, len(train_data)))
    print('Test data: %s: %d' % (test_data_path, len(test_data)))

    train_data = memnn.convert_data(train_data, args.max_memory)
    test_data = memnn.convert_data(test_data, args.max_memory)

    encoder = memnn.make_encoder(args.sentence_repr)
    network = memnn.MemNN(
        args.unit, len(vocab), encoder, args.max_memory, args.hop)
    model = chainer.links.Classifier(network, label_key='answer')
    opt = chainer.optimizers.Adam()

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    opt.setup(model)

    train_iter = chainer.iterators.SerialIterator(
        train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(
        test_data, args.batchsize, repeat=False, shuffle=False)
    updater = chainer.training.StandardUpdater(
        train_iter, opt, device=args.gpu)
    trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))

    @chainer.training.make_extension()
    def fix_ignore_label(trainer):
        network.fix_ignore_label()

    trainer.extend(fix_ignore_label)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.run()

    if args.model:
        memnn.save_model(args.model, model, vocab) 
开发者ID:pfnet,项目名称:pfio,代码行数:49,代码来源:train_memnn.py

示例14: get_trainer

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def get_trainer(net, updater, log_dir, print_fields, curriculum=None, extra_extensions=(), epochs=10, snapshot_interval=20000, print_interval=100, postprocess=None, do_logging=True, model_files=()):
    if curriculum is None:
        trainer = chainer.training.Trainer(
            updater,
            (epochs, 'epoch'),
            out=log_dir,
        )
    else:
        trainer = chainer.training.Trainer(
            updater,
            EarlyStopIntervalTrigger(epochs, 'epoch', curriculum),
            out=log_dir,
        )

    # dump computational graph
    trainer.extend(extensions.dump_graph('main/loss'))

    # also observe learning rate
    observe_lr_extension = chainer.training.extensions.observe_lr()
    observe_lr_extension.trigger = (print_interval, 'iteration')
    trainer.extend(observe_lr_extension)

    # Take snapshots
    trainer.extend(
        extensions.snapshot(filename="trainer_snapshot"),
        trigger=lambda trainer:
        trainer.updater.is_new_epoch or
        (trainer.updater.iteration > 0 and trainer.updater.iteration % snapshot_interval == 0)
    )

    if do_logging:
        # write all statistics to a file
        trainer.extend(Logger(model_files, log_dir, keys=print_fields, trigger=(print_interval, 'iteration'), postprocess=postprocess))

        # print some interesting statistics
        trainer.extend(extensions.PrintReport(
            print_fields,
            log_report='Logger',
        ))

    # Progressbar!!
    trainer.extend(extensions.ProgressBar(update_interval=1))

    for extra_extension, trigger in extra_extensions:
        trainer.extend(extra_extension, trigger=trigger)

    return trainer 
开发者ID:Bartzi,项目名称:see,代码行数:49,代码来源:train_utils.py

示例15: train_one_epoch

# 需要导入模块: from chainer.training import extensions [as 别名]
# 或者: from chainer.training.extensions import PrintReport [as 别名]
def train_one_epoch(model, train_data, lr, gpu, batchsize, out):
    train_model = PixelwiseSoftmaxClassifier(model)
    if gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(gpu).use()
        train_model.to_gpu()  # Copy the model to the GPU
    log_trigger = (0.1, 'epoch')
    validation_trigger = (1, 'epoch')
    end_trigger = (1, 'epoch')

    train_data = TransformDataset(
        train_data, ('img', 'label_map'), SimpleDoesItTransform(model.mean))
    val = VOCSemanticSegmentationWithBboxDataset(
        split='val').slice[:, ['img', 'label_map']]

    # Iterator
    train_iter = iterators.MultiprocessIterator(train_data, batchsize)
    val_iter = iterators.MultiprocessIterator(
        val, 1, shuffle=False, repeat=False, shared_mem=100000000)

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    optimizer.setup(train_model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0001))

    # Updater
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, device=gpu)

    # Trainer
    trainer = training.Trainer(updater, end_trigger, out=out)

    trainer.extend(extensions.LogReport(trigger=log_trigger))
    trainer.extend(extensions.observe_lr(), trigger=log_trigger)
    trainer.extend(extensions.dump_graph('main/loss'))

    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(
            ['main/loss'], x_key='iteration',
            file_name='loss.png'))
        trainer.extend(extensions.PlotReport(
            ['validation/main/miou'], x_key='iteration',
            file_name='miou.png'))

    trainer.extend(extensions.snapshot_object(
        model, filename='snapshot.npy'),
        trigger=end_trigger)
    trainer.extend(extensions.PrintReport(
        ['epoch', 'iteration', 'elapsed_time', 'lr',
         'main/loss', 'validation/main/miou',
         'validation/main/mean_class_accuracy',
         'validation/main/pixel_accuracy']),
        trigger=log_trigger)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.extend(
        SemanticSegmentationEvaluator(
            val_iter, model,
            voc_semantic_segmentation_label_names),
        trigger=validation_trigger)
    trainer.run() 
开发者ID:chainer,项目名称:models,代码行数:63,代码来源:train.py


注:本文中的chainer.training.extensions.PrintReport方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。