當前位置: 首頁>>代碼示例>>Python>>正文


Python autograd.record方法代碼示例

本文整理匯總了Python中mxnet.autograd.record方法的典型用法代碼示例。如果您正苦於以下問題:Python autograd.record方法的具體用法?Python autograd.record怎麽用?Python autograd.record使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.autograd的用法示例。


在下文中一共展示了autograd.record方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
          weight_decay, batch_size):
    """Trains the model."""
    dataset_train = gluon.data.ArrayDataset(X_train, y_train)
    data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
                                            shuffle=True)
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': learning_rate,
                             'wd': weight_decay})
    net.initialize(force_reinit=True)
    for epoch in range(epochs):
        for data, label in data_iter_train:
            with autograd.record():
                output = net(data)
                loss = square_loss(output, label)
            loss.backward()
            trainer.step(batch_size)
            avg_loss = get_rmse_log(net, X_train, y_train)
        if epoch > verbose_epoch:
            print("Epoch %d, train loss: %f" % (epoch, avg_loss))
    return avg_loss 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:23,代碼來源:kaggle_k_fold_cross_validation.py

示例2: train

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train(net, epoch, ctx_list):
    net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx_list)
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
    metric = mx.metric.Accuracy()
    loss = gluon.loss.SoftmaxCrossEntropyLoss()

    for i in range(epoch):
        train_data.reset()
        for batch in train_data:
            datas = gluon.utils.split_and_load(batch.data[0], ctx_list, batch_axis=0)
            labels = gluon.utils.split_and_load(batch.label[0], ctx_list, batch_axis=0)
            outputs = []
            with autograd.record():
                for x, y in zip(datas, labels):
                    z = net(x)
                    L = loss(z, y)
                    L.backward()
                    outputs.append(z)
            trainer.step(batch.data[0].shape[0])
            metric.update(labels, outputs)
        name, acc = metric.get()
        metric.reset()
        print('training acc at epoch %d: %s=%f'%(i, name, acc)) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_autograd.py

示例3: test_cross_device_autograd

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def test_cross_device_autograd():
    x = mx.nd.random.uniform(shape=(10,))
    x.attach_grad()

    with mx.autograd.record():
        y = mx.nd.tanh(x)
        y = y.copyto(mx.gpu(0))
        y = mx.nd.tanh(y)
        y = y.copyto(mx.cpu(0))
        y = mx.nd.tanh(y)
        y = y.copyto(mx.gpu(0))
        y = y.copyto(mx.gpu(0))

        y.backward()

    dx = x.grad.asnumpy()
    x.grad[:] = 0

    with mx.autograd.record():
        y = x
        for i in range(3):
            y = mx.nd.tanh(y)
        y.backward()

    assert_almost_equal(dx, x.grad.asnumpy()) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:27,代碼來源:test_operator_gpu.py

示例4: test_batchnorm_backwards_notrain

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def test_batchnorm_backwards_notrain():
    for ctx in [mx.cpu(0), mx.gpu(0)]:
        for cudnn_o in [False, True]:
            B,C,H,W = 4,3,2,2
            x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
            gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
            beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
            mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
            std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
            x.attach_grad()

            with autograd.record(False):
                y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
                                         fix_gamma=False, cudnn_off=cudnn_o)
                loss=y.square().sum()
            loss.backward(train_mode=False) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:test_operator_gpu.py

示例5: test_softmax_activation

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def test_softmax_activation():
    gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
        [2., -.4, 7.,   3., 0.2]], ctx=mx.gpu(0))
    cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
        [2., -.4, 7.,   3., 0.2]], ctx=mx.cpu())

    cpu_a.attach_grad()
    gpu_a.attach_grad()
    with mx.autograd.record():
        gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
        cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
        assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)

        gpu_y.backward()
        cpu_y.backward()
        assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
                atol = 1e-3, rtol = 1e-3) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:19,代碼來源:test_operator_gpu.py

示例6: training

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += np.mean(loss.asnumpy()) / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f' % \
                (epoch, train_loss/(i+1)))
            if i != 0 and i % self.args.log_interval == 0:
                self.logger.info('Epoch %d iteration %04d/%04d: training loss %.3f' % \
                    (epoch, i, len(self.train_data), train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        if self.args.no_val:
            save_checkpoint(self.net.module, self.args, epoch, 0, False) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:24,代碼來源:train.py

示例7: training

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch {}, training loss {}'.format(epoch, train_loss / (i + 1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:19,代碼來源:train_gl_seg.py

示例8: train

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train(net, train_dataloader, epochs, batch_size, is_many_to_one, model_dir):
    loss_fn = RMSE_many_to_one if is_many_to_one else RMSE_many_to_many
    INPUT_SCALER = 300

    for e in range(epochs):
        loss_avg = 0
        for i, ((data, data_lengths), (label)) in enumerate(train_data):
            data = data.as_in_context(ctx).astype('float32')
            label = label.as_in_context(ctx).astype('float32')
            data_lengths = data_lengths.as_in_context(ctx).astype('float32')
            with autograd.record():
                pred = net(data)
                loss, loss_no_weight = loss_fn(pred, label / INPUT_SCALER, data_lengths)
                loss = loss.mean()
            loss.backward()
            trainer.step(data.shape[0])
            loss_avg += loss_no_weight.mean().sqrt().asnumpy()
        logging.info("Epoch {}: Average RMSE {}".format(e, INPUT_SCALER * loss_avg / (i + 1)))
    
    save_model(net, model_dir)
    logging.info("Saved model params")
    logging.info("End of training") 
開發者ID:awslabs,項目名稱:predictive-maintenance-using-machine-learning,代碼行數:24,代碼來源:sagemaker_predictive_maintenance_entry_point.py

示例9: training

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        alpha = 0.2
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f'%\
                (epoch, train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
開發者ID:Angzz,項目名稱:panoptic-fpn-gluon,代碼行數:21,代碼來源:train.py

示例10: train

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train(net,epochs, ctx, train_data,test_data,
            margin_loss, reconstructions_loss, 
            batch_size,scale_factor):
    num_classes = 10
    trainer = gluon.Trainer(
        net.collect_params(),'sgd', {'learning_rate': 0.05, 'wd': 5e-4})

    for epoch in range(epochs):
        train_loss = 0.0
        for batch_idx, (data, label) in tqdm(enumerate(train_data), total=len(train_data), ncols=70, leave=False, unit='b'):
            label = label.as_in_context(ctx)
            data = data.as_in_context(ctx)
            with autograd.record():
                prob, X_l2norm, reconstructions = net(data, label)
                loss1 = margin_loss(data, num_classes,  label, X_l2norm)
                loss2 = reconstructions_loss(reconstructions, data)
                loss = loss1 + scale_factor * loss2
                loss.backward()
            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
        test_acc = test(test_data, net, ctx)
        print('Epoch:{}, TrainLoss:{:.5f}, TestAcc:{}'.format(epoch,train_loss / len(train_data),test_acc)) 
開發者ID:tonysy,項目名稱:CapsuleNet-Gluon,代碼行數:24,代碼來源:main.py

示例11: training

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            self.lr_scheduler.update(i, epoch)
            with autograd.record(True):
                outputs = self.net(data)
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f'%\
                (epoch, train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
開發者ID:zzdang,項目名稱:cascade_rcnn_gluon,代碼行數:21,代碼來源:train.py

示例12: train

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
    """Train and evaluate a model."""
    print('training on', ctx)
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, m, start = 0.0, 0.0, 0, 0, time.time()
        for i, batch in enumerate(train_iter):
            Xs, ys, batch_size = _get_batch(batch, ctx)   
            with autograd.record():
                y_hats = [net(X) for X in Xs]
                ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
            for l in ls:
                l.backward()
            trainer.step(batch_size)
            train_l_sum += sum([l.sum().asscalar() for l in ls])
            n += sum([l.size for l in ls])
            train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
                                 for y_hat, y in zip(y_hats, ys)])
            m += sum([y.size for y in ys])
        test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
              'time %.1f sec'
              % (epoch + 1, train_l_sum / n, train_acc_sum / m, test_acc,
                 time.time() - start)) 
開發者ID:d2l-ai,項目名稱:d2l-zh,代碼行數:27,代碼來源:utils.py

示例13: train_ch3

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
              params=None, lr=None, trainer=None):
    """Train and evaluate a model with CPU."""
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y).sum()
            l.backward()
            if trainer is None:
                sgd(params, lr, batch_size)
            else:
                trainer.step(batch_size)
            y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) 
開發者ID:d2l-ai,項目名稱:d2l-zh,代碼行數:23,代碼來源:utils.py

示例14: train_ch5

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
              num_epochs):
    """Train and evaluate a model with CPU or GPU."""
    print('training on', ctx)
    loss = gloss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
        for X, y in train_iter:
            X, y = X.as_in_context(ctx), y.as_in_context(ctx)
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y).sum()
            l.backward()
            trainer.step(batch_size)
            y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
              'time %.1f sec'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc,
                 time.time() - start)) 
開發者ID:d2l-ai,項目名稱:d2l-zh,代碼行數:25,代碼來源:utils.py

示例15: forward_backward

# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import record [as 別名]
def forward_backward(net, data, label):

    # Ask autograd to remember the forward pass
    with autograd.record():
        # Compute the loss on all GPUs
        losses = [loss(net(X), Y) for X, Y in zip(data, label)]

    # Run the backward pass (calculate gradients) on all GPUs
    for l in losses:
        l.backward()

# Train a batch using multiple GPUs 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:14,代碼來源:cifar10_dist.py


注:本文中的mxnet.autograd.record方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。