当前位置: 首页>>代码示例>>Python>>正文


Python Variable.long方法代码示例

本文整理汇总了Python中torch.autograd.Variable.long方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.long方法的具体用法?Python Variable.long怎么用?Python Variable.long使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.long方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: eval

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
    def eval(self, epoch, save_score=False, loader_name=['test']):
        self.model.eval()
        self.print_log('Eval epoch: {}'.format(epoch + 1))
        for ln in loader_name:
            loss_value = []
            score_frag = []
            for batch_idx, (data, label) in enumerate(self.data_loader[ln]):
                data = Variable(
                    data.float().cuda(self.output_device),
                    requires_grad=False,
                    volatile=True)
                label = Variable(
                    label.long().cuda(self.output_device),
                    requires_grad=False,
                    volatile=True)
                output = self.model(data)
                loss = self.loss(output, label)
                score_frag.append(output.data.cpu().numpy())
                loss_value.append(loss.data[0])
            score = np.concatenate(score_frag)
            score_dict = dict(
                zip(self.data_loader[ln].dataset.sample_name, score))
            self.print_log('\tMean {} loss of {} batches: {}.'.format(
                ln, len(self.data_loader[ln]), np.mean(loss_value)))
            for k in self.arg.show_topk:
                self.print_log('\tTop{}: {:.2f}%'.format(
                    k, 100 * self.data_loader[ln].dataset.top_k(score, k)))

            if save_score:
                with open('{}/epoch{}_{}_score.pkl'.format(
                        self.arg.work_dir, epoch + 1, ln), 'w') as f:
                    pickle.dump(score_dict, f)
开发者ID:sharpstill,项目名称:st-gcn,代码行数:34,代码来源:main.py

示例2: predict

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
def predict(model, test_loader):
    # switch to evaluate mode
    model.eval()

    ################Note############################## 
    # each sample may have different number of points 
    # so just use batch of size 1
    ##################################################
    debug_here() 
    for i, (points_data, _seg_data, labels) in enumerate(test_loader, 0):
        if i%10 == 0:
            print('{0}/{1}'.format(i, len(test_loader)))
            # print(points_data.size())

        points_data = Variable(points_data, volatile=True)
        points_data = points_data.transpose(2, 1)
        _seg_data = Variable(_seg_data, volatile=True) 

        if opt.cuda:
            points_data = points_data.cuda() 
            _seg_data = _seg_data.long().cuda() # must be long cuda tensor  
        
        # forward, backward optimize 
        pred, _ = model(points_data)
        pred = pred.view(-1, opt.num_seg_classes)
        _seg_data = _seg_data.view(-1, 1)[:, 0]  # min is already 0
        pred_choice = pred.data.max(1)[1]

    print('finished loading')
开发者ID:ShichaoJin,项目名称:pointnet2.pytorch,代码行数:31,代码来源:eval_part_seg_folder.py

示例3: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, opt):
    """
    train for one epoch on the training set
    """
    batch_time = utils.AverageMeter() 
    losses = utils.AverageMeter()
    top1 = utils.AverageMeter() 

    # training mode
    model.train() 

    end = time.time() 
    for i, (input_points, labels) in enumerate(train_loader):
        # bz x 2048 x 3 
        input_points = Variable(input_points)
        input_points = input_points.transpose(2, 1) 
        labels = Variable(labels[:, 0])

        # print(points.size())
        # print(labels.size())
        # shift data to GPU
        if opt.cuda:
            input_points = input_points.cuda() 
            labels = labels.long().cuda() # must be long cuda tensor  
        
        # forward, backward optimize 
        output, _ = model(input_points)
        # debug_here() 
        loss = criterion(output, labels)
        ##############################
        # measure accuracy
        ##############################
        prec1 = utils.accuracy(output.data, labels.data, topk=(1,))[0]
        losses.update(loss.data[0], input_points.size(0))
        top1.update(prec1[0], input_points.size(0))

        ##############################
        # compute gradient and do sgd 
        ##############################
        optimizer.zero_grad() 
        loss.backward() 
        ##############################
        # gradient clip stuff 
        ##############################
        utils.clip_gradient(optimizer, opt.gradient_clip)
        
        optimizer.step() 

        # measure elapsed time
        batch_time.update(time.time() - end) 
        end = time.time() 
        if i % opt.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              '[email protected] {top1.val:.3f} ({top1.avg:.3f})'.format(
                  epoch, i, len(train_loader), batch_time=batch_time,
                  loss=losses, top1=top1)) 
开发者ID:ShichaoJin,项目名称:pointnet2.pytorch,代码行数:60,代码来源:main_cls.py

示例4: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
    def train(self, epoch, save_model=False):
        self.model.train()
        self.print_log('Training epoch: {}'.format(epoch + 1))
        loader = self.data_loader['train']
        lr = self.adjust_learning_rate(epoch)
        loss_value = []

        self.record_time()
        timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
        for batch_idx, (data, label) in enumerate(loader):

            # get data
            data = Variable(
                data.float().cuda(self.output_device), requires_grad=False)
            label = Variable(
                label.long().cuda(self.output_device), requires_grad=False)
            timer['dataloader'] += self.split_time()

            # forward
            output = self.model(data)
            loss = self.loss(output, label)

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            loss_value.append(loss.data[0])
            timer['model'] += self.split_time()

            # statistics
            if batch_idx % self.arg.log_interval == 0:
                self.print_log(
                    '\tBatch({}/{}) done. Loss: {:.4f}  lr:{:.6f}'.format(
                        batch_idx, len(loader), loss.data[0], lr))
            timer['statistics'] += self.split_time()

        # statistics of time consumption and loss
        proportion = {
            k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
            for k, v in timer.items()
        }
        self.print_log(
            '\tMean training loss: {:.4f}.'.format(np.mean(loss_value)))
        self.print_log(
            '\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(
                **proportion))

        if save_model:
            model_path = '{}/epoch{}_model.pt'.format(self.arg.work_dir,
                                                      epoch + 1)
            state_dict = self.model.state_dict()
            weights = OrderedDict([[k.split('module.')[-1],
                                    v.cpu()] for k, v in state_dict.items()])
            torch.save(weights, model_path)
开发者ID:sharpstill,项目名称:st-gcn,代码行数:56,代码来源:main.py

示例5: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
    def forward(self, base_feat, im_info, gt_boxes, num_boxes):

        batch_size = base_feat.size(0)

        # return feature map after convrelu layer
        rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
        # get rpn classification score
        rpn_cls_score = self.RPN_cls_score(rpn_conv1)

        rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
        rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, dim=1)
        rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)

        # get rpn offsets to the anchor boxes
        rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)

        # proposal layer
        cfg_key = 'TRAIN' if self.training else 'TEST'

        rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data,
                                 im_info, cfg_key))

        self.rpn_loss_cls = 0
        self.rpn_loss_box = 0

        # generating training labels and build the rpn loss
        if self.training:
            assert gt_boxes is not None

            rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))

            # compute classification loss
            rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
            rpn_label = rpn_data[0].view(batch_size, -1)

            rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))
            rpn_cls_score = torch.index_select(rpn_cls_score.view(-1,2), 0, rpn_keep)
            rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)
            rpn_label = Variable(rpn_label.long())
            self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
            fg_cnt = torch.sum(rpn_label.data.ne(0))

            rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]

            # compute bbox regression loss
            rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
            rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
            rpn_bbox_targets = Variable(rpn_bbox_targets)

            self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
                                                            rpn_bbox_outside_weights, sigma=3, dim=[1,2,3])

        return rois, self.rpn_loss_cls, self.rpn_loss_box
开发者ID:lianDaniel,项目名称:R-FCN.pytorch,代码行数:55,代码来源:rpn.py

示例6: loss_calc

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
def loss_calc(pred, label, gpu):
    """
    This function returns cross entropy loss for semantic segmentation
    """
    # out shape batch_size x channels x h x w -> batch_size x channels x h x w
    # label shape h x w x 1 x batch_size  -> batch_size x 1 x h x w

    label = Variable(label.long()).cuda(gpu)
    criterion = CrossEntropy2d().cuda(gpu)
    # label = Variable(label.long()).cpu()
    # criterion = CrossEntropy2d().cpu()

    return criterion(pred, label)
开发者ID:MrtBian,项目名称:AdvSemiSeg,代码行数:15,代码来源:train.py

示例7: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
 def forward(self, gtruths, encoder_hidden, encoder_outputs, teacher_forcing_ratio, is_train):
     
     
     if is_train:
         if teacher_forcing_ratio > 0:
             gtruths = Variable(gtruths.long()).cuda()
         decoder_outputs, decoder_hidden, ret_dict = self.dec_rnn(inputs = gtruths,
                                                                  encoder_hidden = encoder_hidden,
                                                                  encoder_outputs = encoder_outputs,
                                                                  teacher_forcing_ratio = teacher_forcing_ratio)
     else:
         decoder_outputs, decoder_hidden, ret_dict = self.beam_dec(inputs = gtruths,
                                                                   encoder_hidden = encoder_hidden,
                                                                   encoder_outputs = encoder_outputs,
                                                                   teacher_forcing_ratio = teacher_forcing_ratio)
         
     return decoder_outputs, decoder_hidden, ret_dict
开发者ID:xuwenshen,项目名称:Machine-Translation,代码行数:19,代码来源:net.py

示例8: _anchor_target_layer

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
  def _anchor_target_layer(self, rpn_cls_score):
    rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = \
      anchor_target_layer(
      rpn_cls_score.data, self._gt_boxes.data.cpu().numpy(), self._im_info, self._feat_stride, self._anchors.data.cpu().numpy(), self._num_anchors)

    rpn_labels = Variable(torch.from_numpy(rpn_labels).float().cuda()) #.set_shape([1, 1, None, None])
    rpn_bbox_targets = Variable(torch.from_numpy(rpn_bbox_targets).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
    rpn_bbox_inside_weights = Variable(torch.from_numpy(rpn_bbox_inside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
    rpn_bbox_outside_weights = Variable(torch.from_numpy(rpn_bbox_outside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])

    rpn_labels = rpn_labels.long()
    self._anchor_targets['rpn_labels'] = rpn_labels
    self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
    self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
    self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights

    for k in self._anchor_targets.keys():
      self._score_summaries[k] = self._anchor_targets[k]

    return rpn_labels
开发者ID:sunshinezhihuo,项目名称:AlphaPose,代码行数:22,代码来源:network.py

示例9: sort_batch_by_length

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
def sort_batch_by_length(tensor: torch.autograd.Variable, sequence_lengths: torch.autograd.Variable):
    """
    Sort a batch first tensor by some specified lengths.

    Parameters
    ----------
    tensor : Variable(torch.FloatTensor), required.
        A batch first Pytorch tensor.
    sequence_lengths : Variable(torch.LongTensor), required.
        A tensor representing the lengths of some dimension of the tensor which
        we want to sort by.

    Returns
    -------
    sorted_tensor : Variable(torch.FloatTensor)
        The original tensor sorted along the batch dimension with respect to sequence_lengths.
    sorted_sequence_lengths : Variable(torch.LongTensor)
        The original sequence_lengths sorted by decreasing size.
    restoration_indices : Variable(torch.LongTensor)
        Indices into the sorted_tensor such that
        ``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
    """

    if not isinstance(tensor, Variable) or not isinstance(sequence_lengths, Variable):
        raise ConfigurationError("Both the tensor and sequence lengths must be torch.autograd.Variables.")

    sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
    sorted_tensor = tensor.index_select(0, permutation_index)

    # This is ugly, but required - we are creating a new variable at runtime, so we
    # must ensure it has the correct CUDA vs non-CUDA type. We do this by cloning and
    # refilling one of the inputs to the function.
    index_range = sequence_lengths.data.clone().copy_(torch.arange(0, len(sequence_lengths)))
    # This is the equivalent of zipping with index, sorting by the original
    # sequence lengths and returning the now sorted indices.
    index_range = Variable(index_range.long())
    _, reverse_mapping = permutation_index.sort(0, descending=False)
    restoration_indices = index_range.index_select(0, reverse_mapping)
    return sorted_tensor, sorted_sequence_lengths, restoration_indices
开发者ID:cyzhangAThit,项目名称:GLUE-baselines,代码行数:41,代码来源:util.py

示例10: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
    def forward(self, im_data, im_info, gt_boxes, num_boxes):
        batch_size = im_data.size(0)

        im_info = im_info.data
        gt_boxes = gt_boxes.data
        num_boxes = num_boxes.data

        # feed image data to base model to obtain base feature map
        # Bottom-up
        c1 = self.RCNN_layer0(im_data)
        c2 = self.RCNN_layer1(c1)
        c3 = self.RCNN_layer2(c2)
        c4 = self.RCNN_layer3(c3)
        c5 = self.RCNN_layer4(c4)
        c6 = self.RCNN_layer5(c5)

        # Top-down
        p6 = self.RCNN_toplayer(c6)
        p5 = self.RCNN_latlayer1(c5) + p6
        p4 = self.RCNN_latlayer2(c4) + p5
        p3 = self._upsample_add(p4, self.RCNN_latlayer3(c3))
        p3 = self.RCNN_smooth1(p3)
        p2 = self._upsample_add(p3, self.RCNN_latlayer4(c2))
        p2 = self.RCNN_smooth2(p2)

        rpn_feature_maps = [p2, p3, p4, p5, p6]
        mrcnn_feature_maps = [p2, p3, p4, p5]

        rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(rpn_feature_maps, im_info, gt_boxes, num_boxes)

        # if it is training phrase, then use ground trubut bboxes for refining
        if self.training:
            roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
            rois, rois_label, gt_assign, rois_target, rois_inside_ws, rois_outside_ws = roi_data

            ## NOTE: additionally, normalize proposals to range [0, 1],
            #        this is necessary so that the following roi pooling
            #        is correct on different feature maps
            # rois[:, :, 1::2] /= im_info[0][1]
            # rois[:, :, 2::2] /= im_info[0][0]

            rois = rois.view(-1, 5)
            rois_label = rois_label.view(-1).long()
            gt_assign = gt_assign.view(-1).long()
            pos_id = rois_label.nonzero().squeeze()
            gt_assign_pos = gt_assign[pos_id]
            rois_label_pos = rois_label[pos_id]
            rois_label_pos_ids = pos_id

            rois_pos = Variable(rois[pos_id])
            rois = Variable(rois)
            rois_label = Variable(rois_label)

            rois_target = Variable(rois_target.view(-1, rois_target.size(2)))
            rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))
            rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))
        else:
            ## NOTE: additionally, normalize proposals to range [0, 1],
            #        this is necessary so that the following roi pooling
            #        is correct on different feature maps
            # rois[:, :, 1::2] /= im_info[0][1]
            # rois[:, :, 2::2] /= im_info[0][0]

            rois_label = None
            gt_assign = None
            rois_target = None
            rois_inside_ws = None
            rois_outside_ws = None
            rpn_loss_cls = 0
            rpn_loss_bbox = 0
            rois = rois.view(-1, 5)
            pos_id = torch.arange(0, rois.size(0)).long().type_as(rois).long()
            rois_label_pos_ids = pos_id
            rois_pos = Variable(rois[pos_id])
            rois = Variable(rois)

        # print('before pooling, cfg', cfg.POOLING_MODE)
        # print('before pooling, get_cfg', get_cfg().POOLING_MODE)
        # pooling features based on rois, output 14x14 map
        roi_pool_feat = self._PyramidRoI_Feat(mrcnn_feature_maps, rois, im_info)

        # feed pooled features to top model
        pooled_feat = self._head_to_tail(roi_pool_feat)


        # compute bbox offset
        bbox_pred = self.RCNN_bbox_pred(pooled_feat)
        if self.training and not self.class_agnostic:
            # select the corresponding columns according to roi labels
            bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
            bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.long().view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
            bbox_pred = bbox_pred_select.squeeze(1)

        # compute object classification probability
        cls_score = self.RCNN_cls_score(pooled_feat)
        cls_prob = F.softmax(cls_score)

        RCNN_loss_cls = 0
        RCNN_loss_bbox = 0

#.........这里部分代码省略.........
开发者ID:UGuess,项目名称:FaceDetection-DSFD,代码行数:103,代码来源:fpn.py

示例11: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
            #print("images")
            images = Variable(images).cuda(0)
            #print(c3d.state_dict())
            #images = Variable(torch.randn(1, 3, 16, 112, 112)).cuda()
            #print("labels")
            labels_ori = labels
            labels = Variable(labels).cuda(0)
            #print(labels)

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = c3d(images)
            #images.register_hook(print)
            #print(outputs.size())
            #print(labels.size())
            loss = criterion(outputs, labels.long())
            #print("before backward")
            loss.backward()
            #print("after backward")
            optimizer.step()

            if (i + 1) % 10 == 0:
                if loss.data[0] < past_loss_save:
                    #print(loss.data[0])
                    past_loss_save = loss.data[0]
                    torch.save(c3d.state_dict(), 'c3d_4_sgd_cla101.pkl')

                print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f Loss x batch : %.4f'
                      % (epoch + 1, num_epochs, i + 1, 9320 // batch_size, loss.data[0], loss.data[0]*images.size(0)))

                _, predicted = torch.max(outputs.data, 1)
开发者ID:shubhampachori12110095,项目名称:c3d_pytorch,代码行数:33,代码来源:c3d_main.py

示例12: train_epoch

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
    def train_epoch(self):
        """
        Function to train the model for one epoch
        """
        self.model.train()
        self.netG.train()
        self.netD.train()

        for batch_idx, (datas, datat) in tqdm.tqdm(
            enumerate(itertools.izip(self.train_loader, self.target_loader)), total=min(len(self.target_loader), len(self.train_loader)),
            desc='Train epoch = %d' % self.epoch, ncols=80, leave=False):

            data_source, labels_source = datas
            data_target, __ = datat
            data_source_forD = torch.zeros((data_source.size()[0], 3, self.image_size_forD[1], self.image_size_forD[0]))            
            data_target_forD = torch.zeros((data_target.size()[0], 3, self.image_size_forD[1], self.image_size_forD[0]))
            
            # We pass the unnormalized data to the discriminator. So, the GANs produce images without data normalization
            for i in range(data_source.size()[0]):
                data_source_forD[i] = self.train_loader.dataset.transform_forD(data_source[i], self.image_size_forD, resize=False, mean_add=True)
                data_target_forD[i] = self.train_loader.dataset.transform_forD(data_target[i], self.image_size_forD, resize=False, mean_add=True)

            iteration = batch_idx + self.epoch * min(len(self.train_loader), len(self.target_loader))
            self.iteration = iteration

            if self.cuda:
                data_source, labels_source = data_source.cuda(), labels_source.cuda()
                data_target = data_target.cuda()
                data_source_forD = data_source_forD.cuda()
                data_target_forD = data_target_forD.cuda()
            
            data_source, labels_source = Variable(data_source), Variable(labels_source)
            data_target = Variable(data_target)
            data_source_forD = Variable(data_source_forD)
            data_target_forD = Variable(data_target_forD)



            # Source domain 
            score, fc7, pool4, pool3 = self.model(data_source)
            outG_src = self.netG(fc7, pool4, pool3)
            outD_src_fake_s, outD_src_fake_c = self.netD(outG_src)
            outD_src_real_s, outD_src_real_c = self.netD(data_source_forD)
            
            # target domain
            tscore, tfc7, tpool4, tpool3= self.model(data_target)
            outG_tgt = self.netG(tfc7, tpool4, tpool3)
            outD_tgt_real_s, outD_tgt_real_c = self.netD(data_target_forD)
            outD_tgt_fake_s, outD_tgt_fake_c = self.netD(outG_tgt)

            # Creating labels for D. We need two sets of labels since our model is a ACGAN style framework.
            # (1) Labels for the classsifier branch. This will be a downsampled version of original segmentation labels
            # (2) Domain lables for classifying source real, source fake, target real and target fake
            
            # Labels for classifier branch 
            Dout_sz = outD_src_real_s.size()
            label_forD = torch.zeros((outD_tgt_fake_c.size()[0], outD_tgt_fake_c.size()[2], outD_tgt_fake_c.size()[3]))
            for i in range(label_forD.size()[0]):
                label_forD[i] = self.train_loader.dataset.transform_label_forD(labels_source[i], (outD_tgt_fake_c.size()[2], outD_tgt_fake_c.size()[3]))
            if self.cuda:
                label_forD = label_forD.cuda()
            label_forD = Variable(label_forD.long())

            # Domain labels
            domain_labels_src_real = torch.LongTensor(Dout_sz[0],Dout_sz[2],Dout_sz[3]).zero_()
            domain_labels_src_fake = torch.LongTensor(Dout_sz[0],Dout_sz[2],Dout_sz[3]).zero_()+1
            domain_labels_tgt_real = torch.LongTensor(Dout_sz[0],Dout_sz[2],Dout_sz[3]).zero_()+2
            domain_labels_tgt_fake = torch.LongTensor(Dout_sz[0],Dout_sz[2],Dout_sz[3]).zero_()+3

            domain_labels_src_real = Variable(domain_labels_src_real.cuda())
            domain_labels_src_fake = Variable(domain_labels_src_fake.cuda())
            domain_labels_tgt_real = Variable(domain_labels_tgt_real.cuda())
            domain_labels_tgt_fake = Variable(domain_labels_tgt_fake.cuda())

            
            # Updates.
            # There are three sets of updates - (1) Discriminator, (2) Generator and (3) F network
            
            # (1) Discriminator updates
            lossD_src_real_s = cross_entropy2d(outD_src_real_s, domain_labels_src_real, size_average=self.size_average)
            lossD_src_fake_s = cross_entropy2d(outD_src_fake_s, domain_labels_src_fake, size_average=self.size_average)
            lossD_src_real_c = cross_entropy2d(outD_src_real_c, label_forD, size_average=self.size_average)
            lossD_tgt_real = cross_entropy2d(outD_tgt_real_s, domain_labels_tgt_real, size_average=self.size_average)
            lossD_tgt_fake = cross_entropy2d(outD_tgt_fake_s, domain_labels_tgt_fake, size_average=self.size_average)           
            
            self.optimD.zero_grad()            
            lossD = lossD_src_real_s + lossD_src_fake_s + lossD_src_real_c + lossD_tgt_real + lossD_tgt_fake
            lossD /= len(data_source)
            lossD.backward(retain_graph=True)
            self.optimD.step()
        
            
            # (2) Generator updates
            self.optimG.zero_grad()            
            lossG_src_adv_s = cross_entropy2d(outD_src_fake_s, domain_labels_src_real,size_average=self.size_average)
            lossG_src_adv_c = cross_entropy2d(outD_src_fake_c, label_forD,size_average=self.size_average)
            lossG_tgt_adv_s = cross_entropy2d(outD_tgt_fake_s, domain_labels_tgt_real,size_average=self.size_average)
            lossG_src_mse = F.l1_loss(outG_src,data_source_forD)
            lossG_tgt_mse = F.l1_loss(outG_tgt,data_target_forD)

#.........这里部分代码省略.........
开发者ID:Wizaron,项目名称:LSD-seg,代码行数:103,代码来源:trainer_LSD.py

示例13: print

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import long [as 别名]
# Predicting animal type based on various features
xy = np.loadtxt('data-04-zoo.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]

print(x_data.shape, y_data.shape)

nb_classes = 7  # 0 ~ 6

X = Variable(torch.from_numpy(x_data))
Y = Variable(torch.from_numpy(y_data))

# one hot encoding
Y_one_hot = torch.zeros(Y.size()[0], nb_classes)
Y_one_hot.scatter_(1, Y.long().data, 1)
Y_one_hot = Variable(Y_one_hot)
print("one_hot", Y_one_hot.data)

softmax = torch.nn.Softmax()
model = torch.nn.Linear(16, nb_classes, bias=True)

# Cross entropy cost/loss
criterion = torch.nn.CrossEntropyLoss()    # Softmax is internally computed.
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

for step in range(2001):
    optimizer.zero_grad()
    hypothesis = model(X)
    # Label has to be 1D LongTensor
    cost = criterion(hypothesis, Y.long().view(-1))
开发者ID:Fred159,项目名称:DeepLearningZeroToAll,代码行数:32,代码来源:lab-06-2-softmax_zoo_classifier.py


注:本文中的torch.autograd.Variable.long方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。