当前位置: 首页>>代码示例>>Python>>正文


Python torch.size方法代码示例

本文整理汇总了Python中torch.size方法的典型用法代码示例。如果您正苦于以下问题:Python torch.size方法的具体用法?Python torch.size怎么用?Python torch.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: focal_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def focal_loss(self, inputs, targets):
        '''Focal loss.
        mean of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
        '''
        N = inputs.size(0)
        C = inputs.size(1)
        P = F.softmax(inputs)
        
        class_mask = inputs.data.new(N, C).fill_(0)
        class_mask = Variable(class_mask)
        ids = targets.view(-1, 1)
        class_mask.scatter_(1, ids.data, 1.)

        if inputs.is_cuda and not self.alpha.is_cuda:
            self.alpha = self.alpha.cuda()
        alpha = self.alpha[ids.data.view(-1)]
        probs = (P*class_mask).sum(1).view(-1,1)
        log_p = probs.log()

        batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p 

        loss = batch_loss.mean()
        return loss 
开发者ID:ShuangXieIrene,项目名称:ssds.pytorch,代码行数:25,代码来源:focal_loss.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def forward(self,input_gradients, prev_state):
        if USE_CUDA:
            input_gradients = input_gradients.cuda()
        #pytorch requires the `torch.nn.lstm`'s input as(1,batchsize,input_dim)
        # original gradient.size()=torch.size[5] ->[1,1,5]
        gradients = input_gradients.unsqueeze(0)
        if self.preprocess_flag == True:
            gradients = self.LogAndSign_Preprocess_Gradient(gradients)
        update , next_state = self.Output_Gradient_Increment_And_Update_LSTM_Hidden_State(gradients , prev_state)
        # Squeeze to make it a single batch again.[1,1,5]->[5]
        update = update.squeeze().squeeze()
       
        return update , next_state
    
#################   优化器模型参数  ##############################
#################   Parameters of optimizer ############################## 
开发者ID:yangsenius,项目名称:learning-to-learn-by-pytorch,代码行数:18,代码来源:learning_to_learn.py

示例3: semantic_segmentation_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def semantic_segmentation_loss(self, segment_data, mask_t, class_t, interpolation_mode='bilinear'):
        # Note num_classes here is without the background class so cfg.num_classes-1
        batch_size, num_classes, mask_h, mask_w = segment_data.size()
        loss_s = 0
        
        for idx in range(batch_size):
            cur_segment = segment_data[idx]
            cur_class_t = class_t[idx]

            with torch.no_grad():
                downsampled_masks = F.interpolate(mask_t[idx].unsqueeze(0), (mask_h, mask_w),
                                                  mode=interpolation_mode, align_corners=False).squeeze(0)
                downsampled_masks = downsampled_masks.gt(0.5).float()
                
                # Construct Semantic Segmentation
                segment_t = torch.zeros_like(cur_segment, requires_grad=False)
                for obj_idx in range(downsampled_masks.size(0)):
                    segment_t[cur_class_t[obj_idx]] = torch.max(segment_t[cur_class_t[obj_idx]], downsampled_masks[obj_idx])
            
            loss_s += F.binary_cross_entropy_with_logits(cur_segment, segment_t, reduction='sum')
        
        return loss_s / mask_h / mask_w * cfg.semantic_segmentation_alpha 
开发者ID:dbolya,项目名称:yolact,代码行数:24,代码来源:multibox_loss.py

示例4: coeff_diversity_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def coeff_diversity_loss(self, coeffs, instance_t):
        """
        coeffs     should be size [num_pos, num_coeffs]
        instance_t should be size [num_pos] and be values from 0 to num_instances-1
        """
        num_pos = coeffs.size(0)
        instance_t = instance_t.view(-1) # juuuust to make sure

        coeffs_norm = F.normalize(coeffs, dim=1)
        cos_sim = coeffs_norm @ coeffs_norm.t()

        inst_eq = (instance_t[:, None].expand_as(cos_sim) == instance_t[None, :].expand_as(cos_sim)).float()

        # Rescale to be between 0 and 1
        cos_sim = (cos_sim + 1) / 2

        # If they're the same instance, use cosine distance, else use cosine similarity
        loss = (1 - cos_sim) * inst_eq + cos_sim * (1 - inst_eq)

        # Only divide by num_pos once because we're summing over a num_pos x num_pos tensor
        # and all the losses will be divided by num_pos at the end, so just one extra time.
        return cfg.mask_proto_coeff_diversity_alpha * loss.sum() / num_pos 
开发者ID:dbolya,项目名称:yolact,代码行数:24,代码来源:multibox_loss.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def forward(self, inputs, targets):
        N = inputs.size(0)
        C = inputs.size(1)
        P = F.softmax(inputs, dim=1)
        class_mask = inputs.data.new(N, C).fill_(0)
        class_mask = Variable(class_mask)
        ids = targets.view(-1, 1)
        class_mask.scatter_(1, ids.data, 1.)

        if inputs.is_cuda and not self.alpha.is_cuda:
            self.alpha = self.alpha.cuda()
        alpha = self.alpha[ids.data.view(-1)]

        probs = (P * class_mask).sum(1).view(-1, 1)

        log_p = probs.log()

        batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p

        if self.size_average:
            loss = batch_loss.mean()
        else:
            loss = batch_loss.sum()
        return loss 
开发者ID:ouyanghuiyu,项目名称:RefinedetLite.pytorch,代码行数:26,代码来源:refinedet_multibox_loss.py

示例6: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def __init__(self, cfg, num_classes):
        super(HSDMultiBoxLoss, self).__init__()
        self.cfg = cfg
        self.size = cfg.MODEL.SIZE
        if self.size == '300':
            size_cfg = cfg.SMALL
        else:
            size_cfg = cfg.BIG
        self.variance = size_cfg.VARIANCE
        self.num_classes = num_classes
        self.threshold = cfg.TRAIN.OVERLAP
        self.OHEM = cfg.TRAIN.OHEM
        self.negpos_ratio = cfg.TRAIN.NEG_RATIO
        self.object_score = cfg.MODEL.OBJECT_SCORE
        self.variance = size_cfg.VARIANCE
        if cfg.TRAIN.FOCAL_LOSS:
            if cfg.TRAIN.FOCAL_LOSS_TYPE == 'SOFTMAX':
                self.focaloss = FocalLossSoftmax(
                    self.num_classes, gamma=2, size_average=False)
            else:
                self.focaloss = FocalLossSigmoid() 
开发者ID:JialeCao001,项目名称:HSD,代码行数:23,代码来源:hsd_multibox_loss.py

示例7: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def __init__(self, cfg):
        super(MultiBoxLoss, self).__init__()
        self.cfg = cfg
        self.size = cfg.MODEL.SIZE
        if self.size == '300':
            size_cfg = cfg.SMALL
        else:
            size_cfg = cfg.BIG
        self.variance = size_cfg.VARIANCE
        self.num_classes = cfg.MODEL.NUM_CLASSES
        self.threshold = cfg.TRAIN.OVERLAP
        self.OHEM = cfg.TRAIN.OHEM
        self.negpos_ratio = cfg.TRAIN.NEG_RATIO
        self.variance = size_cfg.VARIANCE
        if cfg.TRAIN.FOCAL_LOSS:
            if cfg.TRAIN.FOCAL_LOSS_TYPE == 'SOFTMAX':
                self.focaloss = FocalLossSoftmax(
                    self.num_classes, gamma=2, size_average=False)
            else:
                self.focaloss = FocalLossSigmoid() 
开发者ID:JialeCao001,项目名称:HSD,代码行数:22,代码来源:multibox_loss.py

示例8: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def __init__(self, cfg, num_classes):
        super(RefineMultiBoxLoss, self).__init__()
        self.cfg = cfg
        self.size = cfg.MODEL.SIZE
        if self.size == '300':
            size_cfg = cfg.SMALL
        else:
            size_cfg = cfg.BIG
        self.variance = size_cfg.VARIANCE
        self.num_classes = num_classes
        self.threshold = cfg.TRAIN.OVERLAP
        self.OHEM = cfg.TRAIN.OHEM
        self.negpos_ratio = cfg.TRAIN.NEG_RATIO
        self.object_score = cfg.MODEL.OBJECT_SCORE
        self.variance = size_cfg.VARIANCE
        if cfg.TRAIN.FOCAL_LOSS:
            if cfg.TRAIN.FOCAL_LOSS_TYPE == 'SOFTMAX':
                self.focaloss = FocalLossSoftmax(
                    self.num_classes, gamma=2, size_average=False)
            else:
                self.focaloss = FocalLossSigmoid() 
开发者ID:yqyao,项目名称:SSD_Pytorch,代码行数:23,代码来源:refine_multibox_loss.py

示例9: _batch2torch

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def _batch2torch(self, batch, batch_size):
        """ List of transitions -> Batch of transitions -> pytorch tensors.

            Returns:
                states: torch.size([batch_size, hist_len, w, h])
                a/r/d: torch.size([batch_size, 1])
        """
        # check-out pytorch dqn tutorial.
        # (t1, t2, ... tn) -> t((s1, s2, ..., sn), (a1, a2, ... an) ...)
        batch = BatchTransition(*zip(*batch))

        # lists to tensors
        state_batch = torch.cat(batch.state, 0).type(self.dtype.FT) / 255
        action_batch = self.dtype.LT(batch.action).unsqueeze(1)
        reward_batch = self.dtype.FT(batch.reward).unsqueeze(1)
        next_state_batch = torch.cat(batch.state_, 0).type(self.dtype.FT) / 255
        # [False, False, True, False] -> [1, 1, 0, 1]::ByteTensor
        mask = 1 - self.dtype.BT(batch.done).unsqueeze(1)

        return [batch_size, state_batch, action_batch, reward_batch,
                next_state_batch, mask] 
开发者ID:floringogianu,项目名称:categorical-dqn,代码行数:23,代码来源:ntuple_experience_replay.py

示例10: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def forward(self,input_gradients, prev_state):
        if USE_CUDA:
            input_gradients = input_gradients.cuda()
        #LSTM的输入为梯度,pytorch要求torch.nn.lstm的输入为(1,batchsize,input_dim)
        #原gradient.size()=torch.size[5] ->[1,1,5]
        gradients = input_gradients.unsqueeze(0)
      
        if self.preprocess_flag == True:
            gradients = self.LogAndSign_Preprocess_Gradient(gradients)
       
        update , next_state = self.Output_Gradient_Increment_And_Update_LSTM_Hidden_State(gradients , prev_state)
        # Squeeze to make it a single batch again.[1,1,5]->[5]
        update = update.squeeze().squeeze()
      
        return update , next_state 
开发者ID:yangsenius,项目名称:learning-to-learn-by-pytorch,代码行数:17,代码来源:CoordinateWiseLSTM.py

示例11: _get_value_info

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def _get_value_info(_dict):
    # given a dict value, return information about this dict's value. Return list of str
    strs = []
    for key, value in _dict.items():
        _str = ''
        if isinstance(value, torch.Tensor):
            _str += "\t{}: (1)type:torch.Tensor (2)dtype:{}, (3)shape:{} ".format(key,
                                                                                  value.dtype, value.size())
        elif isinstance(value, np.ndarray):
            _str += "\t{}: (1)type:numpy.ndarray (2)dtype:{}, (3)shape:{} ".format(key,
                                                                                   value.dtype, value.shape)
        else:
            _str += "\t{}: type:{}".format(key, type(value))
        strs.append(_str)
    return strs 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:17,代码来源:trainer.py

示例12: focal_conf_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def focal_conf_loss(self, conf_data, conf_t):
        """
        Focal loss as described in https://arxiv.org/pdf/1708.02002.pdf
        Adapted from https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
        Note that this uses softmax and not the original sigmoid from the paper.
        """
        conf_t = conf_t.view(-1) # [batch_size*num_priors]
        conf_data = conf_data.view(-1, conf_data.size(-1)) # [batch_size*num_priors, num_classes]

        # Ignore neutral samples (class < 0)
        keep = (conf_t >= 0).float()
        conf_t[conf_t < 0] = 0 # so that gather doesn't drum up a fuss

        logpt = F.log_softmax(conf_data, dim=-1)
        logpt = logpt.gather(1, conf_t.unsqueeze(-1))
        logpt = logpt.view(-1)
        pt    = logpt.exp()

        # I adapted the alpha_t calculation here from
        # https://github.com/pytorch/pytorch/blob/master/modules/detectron/softmax_focal_loss_op.cu
        # You'd think you want all the alphas to sum to one, but in the original implementation they
        # just give background an alpha of 1-alpha and each forground an alpha of alpha.
        background = (conf_t == 0).float()
        at = (1 - cfg.focal_loss_alpha) * background + cfg.focal_loss_alpha * (1 - background)

        loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt

        # See comment above for keep
        return cfg.conf_alpha * (loss * keep).sum() 
开发者ID:dbolya,项目名称:yolact,代码行数:31,代码来源:multibox_loss.py

示例13: focal_conf_sigmoid_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def focal_conf_sigmoid_loss(self, conf_data, conf_t):
        """
        Focal loss but using sigmoid like the original paper.
        Note: To make things mesh easier, the network still predicts 81 class confidences in this mode.
              Because retinanet originally only predicts 80, we simply just don't use conf_data[..., 0]
        """
        num_classes = conf_data.size(-1)

        conf_t = conf_t.view(-1) # [batch_size*num_priors]
        conf_data = conf_data.view(-1, num_classes) # [batch_size*num_priors, num_classes]

        # Ignore neutral samples (class < 0)
        keep = (conf_t >= 0).float()
        conf_t[conf_t < 0] = 0 # can't mask with -1, so filter that out

        # Compute a one-hot embedding of conf_t
        # From https://github.com/kuangliu/pytorch-retinanet/blob/master/utils.py
        conf_one_t = torch.eye(num_classes, device=conf_t.get_device())[conf_t]
        conf_pm_t  = conf_one_t * 2 - 1 # -1 if background, +1 if forground for specific class

        logpt = F.logsigmoid(conf_data * conf_pm_t) # note: 1 - sigmoid(x) = sigmoid(-x)
        pt    = logpt.exp()

        at = cfg.focal_loss_alpha * conf_one_t + (1 - cfg.focal_loss_alpha) * (1 - conf_one_t)
        at[..., 0] = 0 # Set alpha for the background class to 0 because sigmoid focal loss doesn't use it

        loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt
        loss = keep * loss.sum(dim=-1)

        return cfg.conf_alpha * loss.sum() 
开发者ID:dbolya,项目名称:yolact,代码行数:32,代码来源:multibox_loss.py

示例14: focal_conf_objectness_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def focal_conf_objectness_loss(self, conf_data, conf_t):
        """
        Instead of using softmax, use class[0] to be the objectness score and do sigmoid focal loss on that.
        Then for the rest of the classes, softmax them and apply CE for only the positive examples.

        If class[0] = 1 implies forground and class[0] = 0 implies background then you achieve something
        similar during test-time to softmax by setting class[1:] = softmax(class[1:]) * class[0] and invert class[0].
        """

        conf_t = conf_t.view(-1) # [batch_size*num_priors]
        conf_data = conf_data.view(-1, conf_data.size(-1)) # [batch_size*num_priors, num_classes]

        # Ignore neutral samples (class < 0)
        keep = (conf_t >= 0).float()
        conf_t[conf_t < 0] = 0 # so that gather doesn't drum up a fuss

        background = (conf_t == 0).float()
        at = (1 - cfg.focal_loss_alpha) * background + cfg.focal_loss_alpha * (1 - background)

        logpt = F.logsigmoid(conf_data[:, 0]) * (1 - background) + F.logsigmoid(-conf_data[:, 0]) * background
        pt    = logpt.exp()

        obj_loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt

        # All that was the objectiveness loss--now time for the class confidence loss
        pos_mask = conf_t > 0
        conf_data_pos = (conf_data[:, 1:])[pos_mask] # Now this has just 80 classes
        conf_t_pos    = conf_t[pos_mask] - 1         # So subtract 1 here

        class_loss = F.cross_entropy(conf_data_pos, conf_t_pos, reduction='sum')

        return cfg.conf_alpha * (class_loss + (obj_loss * keep).sum()) 
开发者ID:dbolya,项目名称:yolact,代码行数:34,代码来源:multibox_loss.py

示例15: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import size [as 别名]
def __init__(self, num_classes, size, overlap_thresh, prior_for_matching,
                 bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
                 use_gpu=True):
        super(MultiBoxLoss, self).__init__()
        self.use_gpu = use_gpu
        self.num_classes = num_classes
        self.threshold = overlap_thresh
        self.background_label = bkg_label
        self.encode_target = encode_target
        self.use_prior_for_matching = prior_for_matching
        self.do_neg_mining = neg_mining
        self.negpos_ratio = neg_pos
        self.neg_overlap = neg_overlap
        cfg = v[str(size)]
        self.variance = cfg['variance'] 
开发者ID:qijiezhao,项目名称:pytorch-ssd,代码行数:17,代码来源:multibox_loss.py


注:本文中的torch.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。