當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.addmm方法代碼示例

本文整理匯總了Python中torch.addmm方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.addmm方法的具體用法?Python torch.addmm怎麽用?Python torch.addmm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.addmm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: linear

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def linear(input, weight, bias=None):
    # type: (Tensor, Tensor, Optional[Tensor]) -> Tensor
    r"""
    Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.

    Shape:

        - Input: :math:`(N, *, in\_features)` where `*` means any number of
          additional dimensions
        - Weight: :math:`(out\_features, in\_features)`
        - Bias: :math:`(out\_features)`
        - Output: :math:`(N, *, out\_features)`
    """
    if input.dim() == 2 and bias is not None:
        # fused op is marginally faster
        ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
    else:
        output = input.matmul(weight.t())
        if bias is not None:
            output += torch.jit._unwrap_optional(bias)
        ret = output
    return ret 
開發者ID:MagicChuyi,項目名稱:SlowFast-Network-pytorch,代碼行數:24,代碼來源:functional.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx



        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, g = torch.split(wh_b + wi, split_size_or_sections=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)

        return c_1 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:26,代碼來源:modules.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
        ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)
        check_input(input)
        cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
        cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=0)
        cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=0)
        cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=0)
        cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
        if input.dim() == 2 :
            if bias is not None:
                return torch.addmm(bias, input, cat_kernels_4_quaternion)
            else:
                return torch.mm(input, cat_kernels_4_quaternion)
        else:
            output = torch.matmul(input, cat_kernels_4_quaternion)
            if bias is not None:
                return output+bias
            else:
                return output

    # This function has only a single output, so it gets only one gradient 
開發者ID:Orkis-Research,項目名稱:Pytorch-Quaternion-Neural-Networks,代碼行數:23,代碼來源:quaternion_ops.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(
        self,
        x1: torch.Tensor,
        x2: torch.Tensor,
        diag: Optional[bool] = False,
        last_dim_is_batch: Optional[bool] = False,
        **params,
    ) -> torch.Tensor:
        offset = self.offset.view(*self.batch_shape, 1, 1)

        if last_dim_is_batch:
            x1 = x1.transpose(-1, -2).unsqueeze(-1)
            x2 = x2.transpose(-1, -2).unsqueeze(-1)

        if diag:
            return ((x1 * x2).sum(dim=-1) + self.offset).pow(self.power)

        if x1.dim() == 2 and x2.dim() == 2:
            return torch.addmm(offset, x1, x2.transpose(-2, -1)).pow(self.power)
        else:
            return (torch.matmul(x1, x2.transpose(-2, -1)) + offset).pow(self.power) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:23,代碼來源:polynomial_kernel.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, g = torch.split(wh_b + wi, self.hidden_size, dim=1)
        # Cell states 
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        return c_1 
開發者ID:thunlp,項目名稱:Chinese_NRE,代碼行數:23,代碼來源:mglattice.py

示例6: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1 
開發者ID:gitabcworld,項目名稱:FewShotLearning,代碼行數:25,代碼來源:bnlstm.py

示例7: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
        ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)
        check_input(input)
        cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
        cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=0)
        cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=0)
        cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=0)
        cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
        if input.dim() == 2 :
            if bias is not None:
                return torch.addmm(bias, input, cat_kernels_4_quaternion)
            else: 
                return torch.mm(input, cat_kernels_4_quaternion)
        else:
            output = torch.matmul(input, cat_kernels_4_quaternion)
            if bias is not None:
                return output+bias
            else:
                return output

    # This function has only a single output, so it gets only one gradient 
開發者ID:Orkis-Research,項目名稱:Quaternion-Recurrent-Neural-Networks,代碼行數:23,代碼來源:quaternionops.py

示例8: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).

        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1 
開發者ID:pytorch,項目名稱:benchmark,代碼行數:26,代碼來源:bnlstm.py

示例9: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
開發者ID:atcbosselut,項目名稱:comet-commonsense,代碼行數:10,代碼來源:gpt.py

示例10: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
        assert mask.dim() == 3 and mask.size(0) == 1
        assert features.dim() == 4 and features.size(0) == 1
        assert features.size()[2:] == mask.size()[1:]
        pad_h, pad_w = _pair(padding)
        stride_h, stride_w = _pair(stride)
        if stride_h != 1 or stride_w != 1:
            raise ValueError(
                'Stride could not only be 1 in masked_conv2d currently.')
        if not features.is_cuda:
            raise NotImplementedError

        out_channel, in_channel, kernel_h, kernel_w = weight.size()

        batch_size = features.size(0)
        out_h = int(
            math.floor((features.size(2) + 2 * pad_h -
                        (kernel_h - 1) - 1) / stride_h + 1))
        out_w = int(
            math.floor((features.size(3) + 2 * pad_w -
                        (kernel_h - 1) - 1) / stride_w + 1))
        mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
        output = features.new_zeros(batch_size, out_channel, out_h, out_w)
        if mask_inds.numel() > 0:
            mask_h_idx = mask_inds[:, 0].contiguous()
            mask_w_idx = mask_inds[:, 1].contiguous()
            data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
                                          mask_inds.size(0))
            masked_conv2d_ext.masked_im2col_forward(features, mask_h_idx,
                                                    mask_w_idx, kernel_h,
                                                    kernel_w, pad_h, pad_w,
                                                    data_col)

            masked_output = torch.addmm(1, bias[:, None], 1,
                                        weight.view(out_channel, -1), data_col)
            masked_conv2d_ext.masked_col2im_forward(masked_output, mask_h_idx,
                                                    mask_w_idx, out_h, out_w,
                                                    out_channel, output)
        return output 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:41,代碼來源:masked_conv.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
        assert mask.dim() == 3 and mask.size(0) == 1
        assert features.dim() == 4 and features.size(0) == 1
        assert features.size()[2:] == mask.size()[1:]
        pad_h, pad_w = _pair(padding)
        stride_h, stride_w = _pair(stride)
        if stride_h != 1 or stride_w != 1:
            raise ValueError(
                'Stride could not only be 1 in masked_conv2d currently.')
        if not features.is_cuda:
            raise NotImplementedError

        out_channel, in_channel, kernel_h, kernel_w = weight.size()

        batch_size = features.size(0)
        out_h = int(
            math.floor((features.size(2) + 2 * pad_h -
                        (kernel_h - 1) - 1) / stride_h + 1))
        out_w = int(
            math.floor((features.size(3) + 2 * pad_w -
                        (kernel_h - 1) - 1) / stride_w + 1))
        mask_inds = torch.nonzero(mask[0] > 0)
        mask_h_idx = mask_inds[:, 0].contiguous()
        mask_w_idx = mask_inds[:, 1].contiguous()
        data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
                                      mask_inds.size(0))
        masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx,
                                                 mask_w_idx, kernel_h,
                                                 kernel_w, pad_h, pad_w,
                                                 data_col)

        masked_output = torch.addmm(1, bias[:, None], 1,
                                    weight.view(out_channel, -1), data_col)
        output = features.new_zeros(batch_size, out_channel, out_h, out_w)
        masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx,
                                                 mask_w_idx, out_h, out_w,
                                                 out_channel, output)
        return output 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:40,代碼來源:masked_conv.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
開發者ID:DFKI-NLP,項目名稱:DISTRE,代碼行數:10,代碼來源:transformer.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x 
開發者ID:plkmo,項目名稱:BERT-Relation-Extraction,代碼行數:7,代碼來源:modeling_utils.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
開發者ID:649453932,項目名稱:Bert-Chinese-Text-Classification-Pytorch,代碼行數:10,代碼來源:modeling_openai.py

示例15: test_addmm_backward_for_additive_shared_with_autograd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import addmm [as 別名]
def test_addmm_backward_for_additive_shared_with_autograd(workers):
    """
    Test .backward() on Additive Shared Tensor for addmm
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    a = (
        torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )
    b = (
        torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )
    c = (
        torch.tensor([[2.0, 2], [0, 1]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)
    c = syft.AutogradTensor().on(c)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c_torch = torch.tensor([[2.0, 2], [0, 1]], requires_grad=True)

    r = torch.addmm(c, a, b)
    r_torch = torch.addmm(c_torch, a_torch, b_torch)

    ones = torch.ones(r.shape).fix_prec().share(alice, bob, crypto_provider=james)
    ones = syft.AutogradTensor().on(ones)
    r.backward(ones)
    r_torch.backward(torch.ones(r_torch.shape))

    assert (a.grad.get().float_prec() == a_torch.grad).all()
    assert (b.grad.get().float_prec() == b_torch.grad).all()
    assert (c.grad.get().float_prec() == c_torch.grad).all() 
開發者ID:OpenMined,項目名稱:PySyft,代碼行數:43,代碼來源:test_autograd.py


注:本文中的torch.addmm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。