当前位置: 首页>>代码示例>>Python>>正文


Python autograd.backward方法代码示例

本文整理汇总了Python中mxnet.autograd.backward方法的典型用法代码示例。如果您正苦于以下问题:Python autograd.backward方法的具体用法?Python autograd.backward怎么用?Python autograd.backward使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.autograd的用法示例。


在下文中一共展示了autograd.backward方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: training

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += np.mean(loss.asnumpy()) / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f' % \
                (epoch, train_loss/(i+1)))
            if i != 0 and i % self.args.log_interval == 0:
                self.logger.info('Epoch %d iteration %04d/%04d: training loss %.3f' % \
                    (epoch, i, len(self.train_data), train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        if self.args.no_val:
            save_checkpoint(self.net.module, self.args, epoch, 0, False) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:24,代码来源:train.py

示例2: training

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch {}, training loss {}'.format(epoch, train_loss / (i + 1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
开发者ID:osmr,项目名称:imgclsmob,代码行数:19,代码来源:train_gl_seg.py

示例3: training

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        alpha = 0.2
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f'%\
                (epoch, train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:21,代码来源:train.py

示例4: training

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            self.lr_scheduler.update(i, epoch)
            with autograd.record(True):
                outputs = self.net(data)
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f'%\
                (epoch, train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
开发者ID:zzdang,项目名称:cascade_rcnn_gluon,代码行数:21,代码来源:train.py

示例5: parallel_backward

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def parallel_backward(losses, sync=True):
    """Parallel Backward for CustomOp"""

    def _worker(loss):
        autograd.backward(loss)

    threads = [threading.Thread(target=_worker, args=(loss,)) for loss in losses]
    if sync:
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()
    else:
        for loss in losses:
            loss.backward() 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:17,代码来源:parallel.py

示例6: test_data_parallel

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def test_data_parallel():
    # test gluon.contrib.parallel.DataParallelModel
    net = nn.HybridSequential()
    with net.name_scope():
        net.add(nn.Conv2D(in_channels=1, channels=20, kernel_size=5))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D(pool_size=2, strides=2))
        net.add(nn.Conv2D(in_channels=20, channels=50, kernel_size=5))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D(pool_size=2, strides=2))
        # The Flatten layer collapses all axis, except the first one, into one axis.
        net.add(nn.Flatten())
        net.add(nn.Dense(512,in_units=800))
        net.add(nn.Activation('relu'))
        net.add(nn.Dense(10, in_units=512))

    net.collect_params().initialize()
    criterion = gluon.loss.SoftmaxCELoss(axis=1)

    def test_net_sync(net, criterion, sync, nDevices):
        ctx_list = [mx.cpu(0) for i in range(nDevices)]
        net = DataParallelModel(net, ctx_list, sync=sync)
        criterion = DataParallelCriterion(criterion, ctx_list, sync=sync)
        iters = 100
        # train mode
        for i in range(iters):
            x = mx.random.uniform(shape=(8, 1, 28, 28))
            t = nd.ones(shape=(8))
            with autograd.record():
                y = net(x)
                loss = criterion(y, t)
                autograd.backward(loss)
        # evaluation mode
        for i in range(iters):
            x = mx.random.uniform(shape=(8, 1, 28, 28))
            y = net(x)

    test_net_sync(net, criterion, True, 1)
    test_net_sync(net, criterion, True, 2)
    test_net_sync(net, criterion, False, 1)
    test_net_sync(net, criterion, False, 2) 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:43,代码来源:test_utils_parallel.py

示例7: parallel_backward

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def parallel_backward(losses, sync=True):
    """Parallel Backward for CustomOp"""
    def _worker(loss):
        autograd.backward(loss)
    threads = [threading.Thread(target=_worker, args=(loss,)) for loss in losses]
    if sync:
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()
    else:
        for loss in losses:
            loss.backward() 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:15,代码来源:parallel.py

示例8: forward_backward

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def forward_backward(self, x):
        data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks = x
        with autograd.record():
            gt_label = label[:, :, 4:5]
            gt_box = label[:, :, :4]
            cls_pred, box_pred, roi, samples, matches, rpn_score, rpn_box, anchors, cls_targets, \
                box_targets, box_masks, _ = net(data, gt_box, gt_label)
            # losses of rpn
            rpn_score = rpn_score.squeeze(axis=-1)
            num_rpn_pos = (rpn_cls_targets >= 0).sum()
            rpn_loss1 = self.rpn_cls_loss(rpn_score, rpn_cls_targets,
                                          rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
            rpn_loss2 = self.rpn_box_loss(rpn_box, rpn_box_targets,
                                          rpn_box_masks) * rpn_box.size / num_rpn_pos
            # rpn overall loss, use sum rather than average
            rpn_loss = rpn_loss1 + rpn_loss2
            # losses of rcnn
            num_rcnn_pos = (cls_targets >= 0).sum()
            rcnn_loss1 = self.rcnn_cls_loss(cls_pred, cls_targets,
                                            cls_targets.expand_dims(-1) >= 0) * cls_targets.size / \
                         num_rcnn_pos
            rcnn_loss2 = self.rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / \
                         num_rcnn_pos
            rcnn_loss = rcnn_loss1 + rcnn_loss2
            # overall losses
            total_loss = rpn_loss.sum() * self.mix_ratio + rcnn_loss.sum() * self.mix_ratio

            rpn_loss1_metric = rpn_loss1.mean() * self.mix_ratio
            rpn_loss2_metric = rpn_loss2.mean() * self.mix_ratio
            rcnn_loss1_metric = rcnn_loss1.mean() * self.mix_ratio
            rcnn_loss2_metric = rcnn_loss2.mean() * self.mix_ratio
            rpn_acc_metric = [[rpn_cls_targets, rpn_cls_targets >= 0], [rpn_score]]
            rpn_l1_loss_metric = [[rpn_box_targets, rpn_box_masks], [rpn_box]]
            rcnn_acc_metric = [[cls_targets], [cls_pred]]
            rcnn_l1_loss_metric = [[box_targets, box_masks], [box_pred]]

            if args.amp:
                with amp.scale_loss(total_loss, self._optimizer) as scaled_losses:
                    autograd.backward(scaled_losses)
            else:
                total_loss.backward()

        return rpn_loss1_metric, rpn_loss2_metric, rcnn_loss1_metric, rcnn_loss2_metric, \
               rpn_acc_metric, rpn_l1_loss_metric, rcnn_acc_metric, rcnn_l1_loss_metric 
开发者ID:dmlc,项目名称:dgl,代码行数:46,代码来源:train_faster_rcnn.py

示例9: test_data_parallel

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def test_data_parallel():
    # test gluon.contrib.parallel.DataParallelModel
    net = nn.HybridSequential()
    with net.name_scope():
        net.add(nn.Conv2D(in_channels=1, channels=5, kernel_size=5))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D(pool_size=2, strides=2))
        net.add(nn.Conv2D(in_channels=5, channels=5, kernel_size=5))
        net.add(nn.Activation('relu'))
        net.add(nn.MaxPool2D(pool_size=2, strides=2))
        # The Flatten layer collapses all axis, except the first one, into one axis.
        net.add(nn.Flatten())
        net.add(nn.Dense(8,in_units=80))
        net.add(nn.Activation('relu'))
        net.add(nn.Dense(10, in_units=8))

    net.collect_params().initialize()
    criterion = gluon.loss.SoftmaxCELoss(axis=1)

    def test_net_sync(net, criterion, sync, nDevices):
        ctx_list = [mx.cpu(0) for i in range(nDevices)]
        net = DataParallelModel(net, ctx_list, sync=sync)
        criterion = DataParallelCriterion(criterion, ctx_list, sync=sync)
        iters = 10
        bs = 2
        # train mode
        for i in range(iters):
            x = mx.random.uniform(shape=(bs, 1, 28, 28))
            t = nd.ones(shape=(bs))
            with autograd.record():
                y = net(x)
                loss = criterion(y, t)
                autograd.backward(loss)
        # evaluation mode
        for i in range(iters):
            x = mx.random.uniform(shape=(bs, 1, 28, 28))
            y = net(x)
        nd.waitall()

    # test_net_sync(net, criterion, True, 1)
    test_net_sync(net, criterion, True, 2)
    # test_net_sync(net, criterion, False, 1)
    test_net_sync(net, criterion, False, 2) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:45,代码来源:test_utils_parallel.py

示例10: train

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def train(ctx):
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    if opt.use_pretrained_base:
        if model_name.startswith('simple'):
            net.deconv_layers.initialize(ctx=ctx)
            net.final_layer.initialize(ctx=ctx)
        elif model_name.startswith('mobile'):
            net.upsampling.initialize(ctx=ctx)
    else:
        net.initialize(mx.init.MSRAPrelu(), ctx=ctx)

    trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)

    L = gluon.loss.L2Loss()
    metric = HeatmapAccuracy()

    best_val_score = 1

    if opt.mode == 'hybrid':
        net.hybridize(static_alloc=True, static_shape=True)

    for epoch in range(opt.num_epochs):
        loss_val = 0
        tic = time.time()
        btic = time.time()
        metric.reset()

        for i, batch in enumerate(train_data):
            data, label, weight, imgid = train_batch_fn(batch, ctx)

            with ag.record():
                outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
                loss = [nd.cast(L(nd.cast(yhat, 'float32'), y, w), opt.dtype)
                        for yhat, y, w in zip(outputs, label, weight)]
            ag.backward(loss)
            trainer.step(batch_size)

            metric.update(label, outputs)

            loss_val += sum([l.mean().asscalar() for l in loss]) / num_gpus
            if opt.log_interval and not (i+1)%opt.log_interval:
                metric_name, metric_score = metric.get()
                logger.info('Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\tloss=%f\tlr=%f\t%s=%.3f'%(
                             epoch, i, batch_size*opt.log_interval/(time.time()-btic),
                             loss_val / (i+1), trainer.learning_rate, metric_name, metric_score))
                btic = time.time()

        time_elapsed = time.time() - tic
        logger.info('Epoch[%d]\t\tSpeed: %d samples/sec over %d secs\tloss=%f\n'%(
                     epoch, int(i*batch_size / time_elapsed), int(time_elapsed), loss_val / (i+1)))
        if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
            net.save_parameters('%s/%s-%d.params'%(save_dir, model_name, epoch))
            trainer.save_states('%s/%s-%d.states'%(save_dir, model_name, epoch))

    if save_frequency and save_dir:
        net.save_parameters('%s/%s-%d.params'%(save_dir, model_name, opt.num_epochs-1))
        trainer.save_states('%s/%s-%d.states'%(save_dir, model_name, opt.num_epochs-1))

    return net 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:62,代码来源:train_simple_pose.py

示例11: train

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def train(opt, net, train_loader, criterion, trainer, batch_size, logger):
    """train model"""
    for epoch in range(opt.start_epoch, opt.epochs):
        loss_total_val = 0
        loss_loc_val = 0
        loss_cls_val = 0
        batch_time = time.time()
        for i, data in enumerate(train_loader):
            template, search, label_cls, label_loc, label_loc_weight = train_batch_fn(data, opt)
            cls_losses = []
            loc_losses = []
            total_losses = []
            with autograd.record():
                for j in range(len(opt.ctx)):
                    cls, loc = net(template[j], search[j])
                    label_cls_temp = label_cls[j].reshape(-1).asnumpy()
                    pos_index = np.argwhere(label_cls_temp == 1).reshape(-1)
                    neg_index = np.argwhere(label_cls_temp == 0).reshape(-1)
                    if len(pos_index):
                        pos_index = nd.array(pos_index, ctx=opt.ctx[j])
                    else:
                        pos_index = nd.array(np.array([]), ctx=opt.ctx[j])
                    if len(neg_index):
                        neg_index = nd.array(neg_index, ctx=opt.ctx[j])
                    else:
                        neg_index = nd.array(np.array([]), ctx=opt.ctx[j])
                    cls_loss, loc_loss = criterion(cls, loc, label_cls[j], pos_index, neg_index,
                                                   label_loc[j], label_loc_weight[j])
                    total_loss = opt.cls_weight*cls_loss+opt.loc_weight*loc_loss
                    cls_losses.append(cls_loss)
                    loc_losses.append(loc_loss)
                    total_losses.append(total_loss)

                mx.nd.waitall()
                if opt.use_amp:
                    with amp.scale_loss(total_losses, trainer) as scaled_loss:
                        autograd.backward(scaled_loss)
                else:
                    autograd.backward(total_losses)
            trainer.step(batch_size)
            loss_total_val += sum([l.mean().asscalar() for l in total_losses]) / len(total_losses)
            loss_loc_val += sum([l.mean().asscalar() for l in loc_losses]) / len(loc_losses)
            loss_cls_val += sum([l.mean().asscalar() for l in cls_losses]) / len(cls_losses)
            if i%(opt.log_interval) == 0:
                logger.info('Epoch %d iteration %04d/%04d: loc loss %.3f, cls loss %.3f, \
                             training loss %.3f, batch time %.3f'% \
                            (epoch, i, len(train_loader), loss_loc_val/(i+1), loss_cls_val/(i+1),
                             loss_total_val/(i+1), time.time()-batch_time))
                batch_time = time.time()
            mx.nd.waitall()
        # save every epoch
        if opt.no_val:
            save_checkpoint(net, opt, epoch, False) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:55,代码来源:train.py

示例12: forward_backward

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def forward_backward(self, x):
        data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks = x
        with autograd.record():
            gt_label = label[:, :, 4:5]
            gt_box = label[:, :, :4]
            cls_pred, box_pred, _, _, _Z, rpn_score, rpn_box, _, cls_targets, \
                box_targets, box_masks, _ = self.net(data, gt_box, gt_label)
            # losses of rpn
            rpn_score = rpn_score.squeeze(axis=-1)
            num_rpn_pos = (rpn_cls_targets >= 0).sum()
            rpn_loss1 = self.rpn_cls_loss(rpn_score, rpn_cls_targets,
                                          rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
            rpn_loss2 = self.rpn_box_loss(rpn_box, rpn_box_targets,
                                          rpn_box_masks) * rpn_box.size / num_rpn_pos
            # rpn overall loss, use sum rather than average
            rpn_loss = rpn_loss1 + rpn_loss2
            # losses of rcnn
            num_rcnn_pos = (cls_targets >= 0).sum()
            rcnn_loss1 = self.rcnn_cls_loss(
                cls_pred, cls_targets, cls_targets.expand_dims(-1) >= 0) * cls_targets.size / \
                         num_rcnn_pos
            rcnn_loss2 = self.rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / \
                         num_rcnn_pos
            rcnn_loss = rcnn_loss1 + rcnn_loss2
            # overall losses
            total_loss = rpn_loss.sum() * self.mix_ratio + rcnn_loss.sum() * self.mix_ratio

            rpn_loss1_metric = rpn_loss1.mean() * self.mix_ratio
            rpn_loss2_metric = rpn_loss2.mean() * self.mix_ratio
            rcnn_loss1_metric = rcnn_loss1.mean() * self.mix_ratio
            rcnn_loss2_metric = rcnn_loss2.mean() * self.mix_ratio
            rpn_acc_metric = [[rpn_cls_targets, rpn_cls_targets >= 0], [rpn_score]]
            rpn_l1_loss_metric = [[rpn_box_targets, rpn_box_masks], [rpn_box]]
            rcnn_acc_metric = [[cls_targets], [cls_pred]]
            rcnn_l1_loss_metric = [[box_targets, box_masks], [box_pred]]

            if self.amp_enabled:
                with amp.scale_loss(total_loss, self._optimizer) as scaled_losses:
                    autograd.backward(scaled_losses)
            else:
                total_loss.backward()

        return rpn_loss1_metric, rpn_loss2_metric, rcnn_loss1_metric, rcnn_loss2_metric, \
               rpn_acc_metric, rpn_l1_loss_metric, rcnn_acc_metric, rcnn_l1_loss_metric 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:46,代码来源:data_parallel.py

示例13: compute_acq_with_gradients

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def compute_acq_with_gradients(
            self, x: np.ndarray,
            model: Optional[SurrogateModel] = None) -> \
            Tuple[np.ndarray, np.ndarray]:
        if model is None:
            model = self.model
        dtype_nd = model.dtype_for_nd()
        dtype_np = x.dtype
        ctx = model.context_for_nd()
        if x.ndim == 1:
            x = x[None, :]
        num_data = x.shape[0]

        # Loop over cases (rows of x), we need the gradients for each case
        # separately
        f_acqu = np.empty((num_data, 1), dtype=dtype_np)
        df_acqu = np.empty_like(x)
        # The current best
        if self._head_needs_current_best():
            current_best = model.current_best().reshape((-1,))
        else:
            current_best = None

        dfdm_nd, dfds_nd, num_samples = None, None, None
        for row in range(num_data):
            x_nd = model.convert_np_to_nd(x[row, None])
            # Compute heads m_nd, s_nd while recording
            x_nd.attach_grad()
            with autograd.record():
                m_nd, s_nd = _reshape_predictions(model.predict_nd(x_nd))
                if dtype_np != dtype_nd:
                    m_nd = m_nd.astype(dtype_np)
                    s_nd = s_nd.astype(dtype_np)

            # Compute head gradients in NumPy
            head_result = self._compute_head(
                m_nd.asnumpy(), s_nd.asnumpy(), current_best)
            f_acqu[row] = np.mean(head_result.hvals)
            if row == 0:
                num_samples = m_nd.size
                dfdm_nd = mx.nd.array(head_result.dh_dmean, ctx=ctx, dtype=dtype_np)
                dfds_nd = mx.nd.array(head_result.dh_dstd, ctx=ctx, dtype=dtype_np)
            else:
                dfdm_nd[:] = head_result.dh_dmean
                dfds_nd[:] = head_result.dh_dstd

            # Backward with specific head gradients
            autograd.backward([m_nd, s_nd], [dfdm_nd, dfds_nd])
            df_acqu[row] = x_nd.grad.asnumpy().astype(
                dtype_np, copy=False) / num_samples
        return f_acqu, df_acqu 
开发者ID:awslabs,项目名称:autogluon,代码行数:53,代码来源:nphead_acqfunc.py

示例14: forward_backward

# 需要导入模块: from mxnet import autograd [as 别名]
# 或者: from mxnet.autograd import backward [as 别名]
def forward_backward(self, x):
        data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks = x
        with autograd.record():
            gt_label = label[:, :, 4:5]
            gt_box = label[:, :, :4]
            cls_pred, box_pred, roi, samples, matches, rpn_score, rpn_box, anchors, cls_targets, \
                box_targets, box_masks, _ = self.net(data, gt_box, gt_label)
            # losses of rpn
            rpn_score = rpn_score.squeeze(axis=-1)
            num_rpn_pos = (rpn_cls_targets >= 0).sum()
            rpn_loss1 = self.rpn_cls_loss(rpn_score, rpn_cls_targets,
                                          rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
            rpn_loss2 = self.rpn_box_loss(rpn_box, rpn_box_targets,
                                          rpn_box_masks) * rpn_box.size / num_rpn_pos
            # rpn overall loss, use sum rather than average
            rpn_loss = rpn_loss1 + rpn_loss2
            # losses of rcnn
            num_rcnn_pos = (cls_targets >= 0).sum()
            rcnn_loss1 = self.rcnn_cls_loss(cls_pred, cls_targets,
                                            cls_targets.expand_dims(-1) >= 0) * cls_targets.size / \
                num_rcnn_pos
            rcnn_loss2 = self.rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / \
                num_rcnn_pos
            rcnn_loss = rcnn_loss1 + rcnn_loss2
            # overall losses
            total_loss = rpn_loss.sum() * self.mix_ratio + rcnn_loss.sum() * self.mix_ratio

            rpn_loss1_metric = rpn_loss1.mean() * self.mix_ratio
            rpn_loss2_metric = rpn_loss2.mean() * self.mix_ratio
            rcnn_loss1_metric = rcnn_loss1.mean() * self.mix_ratio
            rcnn_loss2_metric = rcnn_loss2.mean() * self.mix_ratio
            rpn_acc_metric = [[rpn_cls_targets, rpn_cls_targets >= 0], [rpn_score]]
            rpn_l1_loss_metric = [[rpn_box_targets, rpn_box_masks], [rpn_box]]
            rcnn_acc_metric = [[cls_targets], [cls_pred]]
            rcnn_l1_loss_metric = [[box_targets, box_masks], [box_pred]]

            if self.amp:
                from mxnet.contrib import amp
                with amp.scale_loss(total_loss, self._optimizer) as scaled_losses:
                    autograd.backward(scaled_losses)
            else:
                total_loss.backward()

        return rpn_loss1_metric, rpn_loss2_metric, rcnn_loss1_metric, rcnn_loss2_metric, \
            rpn_acc_metric, rpn_l1_loss_metric, rcnn_acc_metric, rcnn_l1_loss_metric 
开发者ID:awslabs,项目名称:autogluon,代码行数:47,代码来源:data_parallel.py


注:本文中的mxnet.autograd.backward方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。