当前位置: 首页>>代码示例>>Python>>正文


Python torch.prod方法代码示例

本文整理汇总了Python中torch.prod方法的典型用法代码示例。如果您正苦于以下问题:Python torch.prod方法的具体用法?Python torch.prod怎么用?Python torch.prod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.prod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: batch_iou_pair

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def batch_iou_pair(yx_min1, yx_max1, yx_min2, yx_max2, min=float(np.finfo(np.float32).eps)):
    """
    Pairwisely calculates the IoU of two lists (at the same size M) of bounding boxes for N independent batches.
    :author 申瑞珉 (Ruimin Shen)
    :param yx_min1: The top left coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
    :param yx_max1: The bottom right coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
    :param yx_min2: The top left coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
    :param yx_max2: The bottom right coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
    :return: The lists (size [N, M]) of the IoU.
    """
    yx_min = torch.max(yx_min1, yx_min2)
    yx_max = torch.min(yx_max1, yx_max2)
    size = torch.clamp(yx_max - yx_min, min=0)
    intersect_area = torch.prod(size, -1)
    area1 = torch.prod(yx_max1 - yx_min1, -1)
    area2 = torch.prod(yx_max2 - yx_min2, -1)
    union_area = torch.clamp(area1 + area2 - intersect_area, min=min)
    return intersect_area / union_area 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:20,代码来源:torch.py

示例2: fit_positive

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def fit_positive(rows, cols, yx_min, yx_max, anchors):
    device_id = anchors.get_device() if torch.cuda.is_available() else None
    batch_size, num, _ = yx_min.size()
    num_anchors, _ = anchors.size()
    valid = torch.prod(yx_min < yx_max, -1)
    center = (yx_min + yx_max) / 2
    ij = torch.floor(center)
    i, j = torch.unbind(ij.long(), -1)
    index = i * cols + j
    anchors2 = anchors / 2
    iou_matrix = utils.iou.torch.iou_matrix((yx_min - center).view(-1, 2), (yx_max - center).view(-1, 2), -anchors2, anchors2).view(batch_size, -1, num_anchors)
    iou, index_anchor = iou_matrix.max(-1)
    _positive = []
    cells = rows * cols
    for valid, index, index_anchor in zip(torch.unbind(valid), torch.unbind(index), torch.unbind(index_anchor)):
        index, index_anchor = (t[valid] for t in (index, index_anchor))
        t = utils.ensure_device(torch.ByteTensor(cells, num_anchors).zero_(), device_id)
        t[index, index_anchor] = 1
        _positive.append(t)
    return torch.stack(_positive) 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:22,代码来源:__init__.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def forward(self, pred, target):
        orig_shape = pred.shape
        pred = pred.view(-1,4)
        target = target.view(-1,4)
        tl = torch.max((pred[:, :2]-pred[:,2:]/2),
                      (target[:, :2] - target[:, 2:]/2))
        br = torch.min((pred[:, :2]+pred[:,2:]/2),
                      (target[:, :2] + target[:, 2:]/2))

        area_p = torch.prod(pred[:,2:], 1)
        area_g = torch.prod(target[:,2:], 1)

        en = (tl< br).type(tl.type()).prod(dim=1)
        area_i = torch.prod(br-tl, 1) * en
        iou= (area_i) / (area_p+area_g-area_i+ 1e-16)

        loss = 1-iou**2
        if self.reduction =='mean':
            loss = loss.mean()
        elif self.reduction == 'sum':
            loss = loss.sum()

        return loss 
开发者ID:ruinmessi,项目名称:ASFF,代码行数:25,代码来源:utils_loss.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def forward(
        self,
        unprojected_outs,
        src_tokens=None,
        input_tokens=None,
        possible_translation_tokens=None,
        select_single=None,
    ):
        stacked = (
            torch.stack(unprojected_outs)
            if select_single is None
            else torch.unsqueeze(unprojected_outs[select_single], 0)
        )
        return self.output_projection(
            torch.prod(self.activation(stacked), dim=0),
            src_tokens,
            input_tokens,
            possible_translation_tokens,
        ) 
开发者ID:pytorch,项目名称:translate,代码行数:21,代码来源:multi_model.py

示例5: ms_ssim

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def ms_ssim(self, img1, img2, levels=5):

        weight = Variable(torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).cuda())

        msssim = Variable(torch.Tensor(levels,).cuda())
        mcs = Variable(torch.Tensor(levels,).cuda())
        for i in range(levels):
            ssim_map, mcs_map = self._ssim(img1, img2)
            msssim[i] = ssim_map
            mcs[i] = mcs_map
            filtered_im1 = F.avg_pool2d(img1, kernel_size=2, stride=2)
            filtered_im2 = F.avg_pool2d(img2, kernel_size=2, stride=2)
            img1 = filtered_im1
            img2 = filtered_im2

        value = (torch.prod(mcs[0:levels-1]**weight[0:levels-1])*
                                    (msssim[levels-1]**weight[levels-1]))
        return value 
开发者ID:One-sixth,项目名称:ms_ssim_pytorch,代码行数:20,代码来源:no1_ms_ssim_lizhengwei1992_MS_SSIM_pytorch.py

示例6: msssim

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def msssim(self, img1, img2):
        levels = self.levels
        mssim = []
        mcs = []

        img1, img2, img11, img22, img12 = img1, img2, None, None, None
        for i in range(levels):
            l, cs = \
                    t_ssim(img1, img2, img11, img22, img12, \
                                Variable(getattr(self, "window"), requires_grad=False),\
                                self.channel, size_average=self.size_average, dilation=(1 + int(i ** 1.5)))

            img1 = F.avg_pool2d(img1, (2, 2))
            img2 = F.avg_pool2d(img2, (2, 2))
            mssim.append(l)
            mcs.append(cs)

        mssim = torch.stack(mssim)
        mcs = torch.stack(mcs)

        weights = Variable(self.weights, requires_grad=False)

        return torch.prod(mssim ** weights) 
开发者ID:Legion56,项目名称:Counting-ICCV-DSSINet,代码行数:25,代码来源:ssim.py

示例7: fake_cumprod

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def fake_cumprod(vb):
    """
    args:
        vb:  [hei x wid]
          -> NOTE: we are lazy here so now it only supports cumprod along wid
    """
    # real_cumprod = torch.cumprod(vb.data, 1)
    vb = vb.unsqueeze(0)
    mul_mask_vb = Variable(torch.zeros(vb.size(2), vb.size(1), vb.size(2))).type_as(vb)
    for i in range(vb.size(2)):
       mul_mask_vb[i, :, :i+1] = 1
    add_mask_vb = 1 - mul_mask_vb
    vb = vb.expand_as(mul_mask_vb) * mul_mask_vb + add_mask_vb
    # vb = torch.prod(vb, 2).transpose(0, 2)                # 0.1.12
    vb = torch.prod(vb, 2, keepdim=True).transpose(0, 2)    # 0.2.0
    # print(real_cumprod - vb.data) # NOTE: checked, ==0
    return vb 
开发者ID:jingweiz,项目名称:pytorch-dnc,代码行数:19,代码来源:fake_ops.py

示例8: _access

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def _access(self, memory_vb): # write
        """
        variables needed:
            wl_curr_vb: [batch_size x num_heads x mem_hei]
            erase_vb:   [batch_size x num_heads x mem_wid]
                     -> /in (0, 1)
            add_vb:     [batch_size x num_heads x mem_wid]
                     -> w/ no restrictions in range
            memory_vb:  [batch_size x mem_hei x mem_wid]
        returns:
            memory_vb:  [batch_size x mem_hei x mem_wid]
        NOTE: IMPORTANT: https://github.com/deepmind/dnc/issues/10
        """

        # first let's do erasion
        weighted_erase_vb = torch.bmm(self.wl_curr_vb.contiguous().view(-1, self.mem_hei, 1),
                                      self.erase_vb.contiguous().view(-1, 1, self.mem_wid)).view(-1, self.num_heads, self.mem_hei, self.mem_wid)
        keep_vb = torch.prod(1. - weighted_erase_vb, dim=1)
        memory_vb = memory_vb * keep_vb
        # finally let's write (do addition)
        return memory_vb + torch.bmm(self.wl_curr_vb.transpose(1, 2), self.add_vb) 
开发者ID:jingweiz,项目名称:pytorch-dnc,代码行数:23,代码来源:dynamic_write_head.py

示例9: __p_k

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def __p_k(self, x, mu, var):
        """
        Returns a tensor with dimensions (n, k, 1) indicating the likelihood of data belonging to the k-th Gaussian.
        args:
            x:      torch.Tensor (n, k, d)
            mu:     torch.Tensor (1, k, d)
            var:    torch.Tensor (1, k, d)
        returns:
            p_k:    torch.Tensor (n, k, 1)
        """

        # (1, k, d) --> (n, k, d)
        mu = mu.expand(x.size(0), self.n_components, self.n_features)
        var = var.expand(x.size(0), self.n_components, self.n_features)

        # (n, k, d) --> (n, k, 1)
        exponent = torch.exp(-.5 * torch.sum((x - mu) * (x - mu) / var, 2, keepdim=True))
        # (n, k, d) --> (n, k, 1)
        prefactor = torch.rsqrt(((2. * pi) ** self.n_features) * torch.prod(var, dim=2, keepdim=True) + self.eps)

        return prefactor * exponent 
开发者ID:kumar-shridhar,项目名称:PyTorch-BayesianCNN,代码行数:23,代码来源:gmm.py

示例10: gaussian_probability

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def gaussian_probability(sigma, mu, target):
    """Returns the probability of `data` given MoG parameters `sigma` and `mu`.
    
    Arguments:
        sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
            size, G is the number of Gaussians, and O is the number of
            dimensions per Gaussian.
        mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
            number of Gaussians, and O is the number of dimensions per Gaussian.
        data (BxI): A batch of data. B is the batch size and I is the number of
            input dimensions.

    Returns:
        probabilities (BxG): The probability of each point in the probability
            of the distribution in the corresponding sigma/mu index.
    """
    target = target.unsqueeze(1).expand_as(sigma)
    ret = ONEOVERSQRT2PI * torch.exp(-0.5 * ((data - mu) / sigma)**2) / sigma
    return torch.prod(ret, 2) 
开发者ID:sagelywizard,项目名称:pytorch-mdn,代码行数:21,代码来源:mdn.py

示例11: init_label_function

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def init_label_function(self, train_x):
        # Allocate label function
        self.y = TensorList([x.new_zeros(self.params.sample_memory_size, 1, x.shape[2], x.shape[3]) for x in train_x])

        # Output sigma factor
        output_sigma_factor = self.fparams.attribute('output_sigma_factor')
        self.sigma = (self.feature_sz / self.img_support_sz * self.base_target_sz).prod().sqrt() * output_sigma_factor * torch.ones(2)

        # Center pos in normalized coords
        target_center_norm = (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz)

        # Generate label functions
        for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz, self.kernel_size, train_x):
            center_pos = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2])
            for i, T in enumerate(self.transforms[:x.shape[0]]):
                sample_center = center_pos + torch.Tensor(T.shift) / self.img_support_sz * sz
                y[i, 0, ...] = dcf.label_function_spatial(sz, sig, sample_center)

        # Return only the ones to use for initial training
        return TensorList([y[:x.shape[0], ...] for y, x in zip(self.y, train_x)]) 
开发者ID:visionml,项目名称:pytracking,代码行数:22,代码来源:atom.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def forward(
        self,
        state_with_presence: Tuple[torch.Tensor, torch.Tensor],
        candidate_with_presence: Tuple[torch.Tensor, torch.Tensor],
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        # ranked_tgt_out_probs shape: batch_size, tgt_seq_len, candidate_size
        # ranked_tgt_out_idx shape: batch_size, tgt_seq_len
        ranked_tgt_out_probs, ranked_tgt_out_idx = self.seq2slate_with_preprocessor(
            state_with_presence, candidate_with_presence
        )
        # convert to slate-wise probabilities
        # ranked_tgt_out_probs shape: batch_size
        ranked_tgt_out_probs = torch.prod(
            torch.gather(
                ranked_tgt_out_probs, 2, ranked_tgt_out_idx.unsqueeze(-1)
            ).squeeze(),
            -1,
        )
        # -2 to offset padding symbol and decoder start symbol
        ranked_tgt_out_idx -= 2
        return ranked_tgt_out_probs, ranked_tgt_out_idx 
开发者ID:facebookresearch,项目名称:ReAgent,代码行数:23,代码来源:predictor_wrapper.py

示例13: hook

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def hook(self, module, input, output):
        class_name = str(module.__class__).split(".")[-1].split("'")[0]
        module_idx = len(self.summary)

        m_key = "%s-%i" % (class_name, module_idx + 1)
        self.summary[m_key] = OrderedDict()
        self.summary[m_key]["input_shape"] = list(input[0].size())
        if isinstance(output, (list, tuple)):
            self.summary[m_key]["output_shape"] = [[-1] + list(o.size())[1:] for o in output]
        else:
            self.summary[m_key]["output_shape"] = list(output.size())

        # -------------------------
        # compute module parameters
        # -------------------------
        params = 0
        if hasattr(module, "weight") and hasattr(module.weight, "size"):
            params += torch.prod(torch.LongTensor(list(module.weight.size())))
            self.summary[m_key]["trainable"] = module.weight.requires_grad
        if hasattr(module, "bias") and hasattr(module.bias, "size"):
            params += torch.prod(torch.LongTensor(list(module.bias.size())))
        self.summary[m_key]["nb_params"] = params

        # -------------------------
        # compute module flops
        # -------------------------
        flops = compute_flops(module, input[0], output)
        self.summary[m_key]["flops"] = flops

        # -------------------------
        # compute module flops
        # -------------------------
        madds = compute_madd(module, input[0], output)
        self.summary[m_key]["madds"] = madds 
开发者ID:Tramac,项目名称:torchscope,代码行数:36,代码来源:scope.py

示例14: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def forward(self, inputs, context=None):
        batch_size = inputs.shape[0]
        num_dims = torch.prod(torch.tensor(inputs.shape[1:]), dtype=torch.float)
        outputs = inputs * self._scale + self._shift
        logabsdet = torch.full([batch_size], self._log_scale * num_dims)
        return outputs, logabsdet 
开发者ID:bayesiains,项目名称:nsf,代码行数:8,代码来源:standard.py

示例15: inverse

# 需要导入模块: import torch [as 别名]
# 或者: from torch import prod [as 别名]
def inverse(self, inputs, context=None):
        batch_size = inputs.shape[0]
        num_dims = torch.prod(torch.tensor(inputs.shape[1:]), dtype=torch.float)
        outputs = (inputs - self._shift) / self._scale
        logabsdet = torch.full([batch_size], -self._log_scale * num_dims)
        return outputs, logabsdet 
开发者ID:bayesiains,项目名称:nsf,代码行数:8,代码来源:standard.py


注:本文中的torch.prod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。