当前位置: 首页>>代码示例>>Python>>正文


Python torch.addmm方法代码示例

本文整理汇总了Python中torch.addmm方法的典型用法代码示例。如果您正苦于以下问题:Python torch.addmm方法的具体用法?Python torch.addmm怎么用?Python torch.addmm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.addmm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: linear

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def linear(input, weight, bias=None):
    # type: (Tensor, Tensor, Optional[Tensor]) -> Tensor
    r"""
    Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.

    Shape:

        - Input: :math:`(N, *, in\_features)` where `*` means any number of
          additional dimensions
        - Weight: :math:`(out\_features, in\_features)`
        - Bias: :math:`(out\_features)`
        - Output: :math:`(N, *, out\_features)`
    """
    if input.dim() == 2 and bias is not None:
        # fused op is marginally faster
        ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())
    else:
        output = input.matmul(weight.t())
        if bias is not None:
            output += torch.jit._unwrap_optional(bias)
        ret = output
    return ret 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:24,代码来源:functional.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx



        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, g = torch.split(wh_b + wi, split_size_or_sections=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)

        return c_1 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:26,代码来源:modules.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
        ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)
        check_input(input)
        cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
        cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=0)
        cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=0)
        cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=0)
        cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
        if input.dim() == 2 :
            if bias is not None:
                return torch.addmm(bias, input, cat_kernels_4_quaternion)
            else:
                return torch.mm(input, cat_kernels_4_quaternion)
        else:
            output = torch.matmul(input, cat_kernels_4_quaternion)
            if bias is not None:
                return output+bias
            else:
                return output

    # This function has only a single output, so it gets only one gradient 
开发者ID:Orkis-Research,项目名称:Pytorch-Quaternion-Neural-Networks,代码行数:23,代码来源:quaternion_ops.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(
        self,
        x1: torch.Tensor,
        x2: torch.Tensor,
        diag: Optional[bool] = False,
        last_dim_is_batch: Optional[bool] = False,
        **params,
    ) -> torch.Tensor:
        offset = self.offset.view(*self.batch_shape, 1, 1)

        if last_dim_is_batch:
            x1 = x1.transpose(-1, -2).unsqueeze(-1)
            x2 = x2.transpose(-1, -2).unsqueeze(-1)

        if diag:
            return ((x1 * x2).sum(dim=-1) + self.offset).pow(self.power)

        if x1.dim() == 2 and x2.dim() == 2:
            return torch.addmm(offset, x1, x2.transpose(-2, -1)).pow(self.power)
        else:
            return (torch.matmul(x1, x2.transpose(-2, -1)) + offset).pow(self.power) 
开发者ID:cornellius-gp,项目名称:gpytorch,代码行数:23,代码来源:polynomial_kernel.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, g = torch.split(wh_b + wi, self.hidden_size, dim=1)
        # Cell states 
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        return c_1 
开发者ID:thunlp,项目名称:Chinese_NRE,代码行数:23,代码来源:mglattice.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 split_size=self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1 
开发者ID:gitabcworld,项目名称:FewShotLearning,代码行数:25,代码来源:bnlstm.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
        ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)
        check_input(input)
        cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
        cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=0)
        cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=0)
        cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=0)
        cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
        if input.dim() == 2 :
            if bias is not None:
                return torch.addmm(bias, input, cat_kernels_4_quaternion)
            else: 
                return torch.mm(input, cat_kernels_4_quaternion)
        else:
            output = torch.matmul(input, cat_kernels_4_quaternion)
            if bias is not None:
                return output+bias
            else:
                return output

    # This function has only a single output, so it gets only one gradient 
开发者ID:Orkis-Research,项目名称:Quaternion-Recurrent-Neural-Networks,代码行数:23,代码来源:quaternionops.py

示例8: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, input_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).

        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        h_0, c_0 = hx
        batch_size = h_0.size(0)
        bias_batch = (self.bias.unsqueeze(0)
                      .expand(batch_size, *self.bias.size()))
        wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
        wi = torch.mm(input_, self.weight_ih)
        f, i, o, g = torch.split(wh_b + wi,
                                 self.hidden_size, dim=1)
        c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
        h_1 = torch.sigmoid(o) * torch.tanh(c_1)
        return h_1, c_1 
开发者ID:pytorch,项目名称:benchmark,代码行数:26,代码来源:bnlstm.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:10,代码来源:gpt.py

示例10: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
        assert mask.dim() == 3 and mask.size(0) == 1
        assert features.dim() == 4 and features.size(0) == 1
        assert features.size()[2:] == mask.size()[1:]
        pad_h, pad_w = _pair(padding)
        stride_h, stride_w = _pair(stride)
        if stride_h != 1 or stride_w != 1:
            raise ValueError(
                'Stride could not only be 1 in masked_conv2d currently.')
        if not features.is_cuda:
            raise NotImplementedError

        out_channel, in_channel, kernel_h, kernel_w = weight.size()

        batch_size = features.size(0)
        out_h = int(
            math.floor((features.size(2) + 2 * pad_h -
                        (kernel_h - 1) - 1) / stride_h + 1))
        out_w = int(
            math.floor((features.size(3) + 2 * pad_w -
                        (kernel_h - 1) - 1) / stride_w + 1))
        mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
        output = features.new_zeros(batch_size, out_channel, out_h, out_w)
        if mask_inds.numel() > 0:
            mask_h_idx = mask_inds[:, 0].contiguous()
            mask_w_idx = mask_inds[:, 1].contiguous()
            data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
                                          mask_inds.size(0))
            masked_conv2d_ext.masked_im2col_forward(features, mask_h_idx,
                                                    mask_w_idx, kernel_h,
                                                    kernel_w, pad_h, pad_w,
                                                    data_col)

            masked_output = torch.addmm(1, bias[:, None], 1,
                                        weight.view(out_channel, -1), data_col)
            masked_conv2d_ext.masked_col2im_forward(masked_output, mask_h_idx,
                                                    mask_w_idx, out_h, out_w,
                                                    out_channel, output)
        return output 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:41,代码来源:masked_conv.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
        assert mask.dim() == 3 and mask.size(0) == 1
        assert features.dim() == 4 and features.size(0) == 1
        assert features.size()[2:] == mask.size()[1:]
        pad_h, pad_w = _pair(padding)
        stride_h, stride_w = _pair(stride)
        if stride_h != 1 or stride_w != 1:
            raise ValueError(
                'Stride could not only be 1 in masked_conv2d currently.')
        if not features.is_cuda:
            raise NotImplementedError

        out_channel, in_channel, kernel_h, kernel_w = weight.size()

        batch_size = features.size(0)
        out_h = int(
            math.floor((features.size(2) + 2 * pad_h -
                        (kernel_h - 1) - 1) / stride_h + 1))
        out_w = int(
            math.floor((features.size(3) + 2 * pad_w -
                        (kernel_h - 1) - 1) / stride_w + 1))
        mask_inds = torch.nonzero(mask[0] > 0)
        mask_h_idx = mask_inds[:, 0].contiguous()
        mask_w_idx = mask_inds[:, 1].contiguous()
        data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
                                      mask_inds.size(0))
        masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx,
                                                 mask_w_idx, kernel_h,
                                                 kernel_w, pad_h, pad_w,
                                                 data_col)

        masked_output = torch.addmm(1, bias[:, None], 1,
                                    weight.view(out_channel, -1), data_col)
        output = features.new_zeros(batch_size, out_channel, out_h, out_w)
        masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx,
                                                 mask_w_idx, out_h, out_w,
                                                 out_channel, output)
        return output 
开发者ID:dingjiansw101,项目名称:AerialDetection,代码行数:40,代码来源:masked_conv.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
开发者ID:DFKI-NLP,项目名称:DISTRE,代码行数:10,代码来源:transformer.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x 
开发者ID:plkmo,项目名称:BERT-Relation-Extraction,代码行数:7,代码来源:modeling_utils.py

示例14: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def forward(self, x):
        if self.rf == 1:
            size_out = x.size()[:-1] + (self.nf,)
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
            x = x.view(*size_out)
        else:
            raise NotImplementedError
        return x 
开发者ID:649453932,项目名称:Bert-Chinese-Text-Classification-Pytorch,代码行数:10,代码来源:modeling_openai.py

示例15: test_addmm_backward_for_additive_shared_with_autograd

# 需要导入模块: import torch [as 别名]
# 或者: from torch import addmm [as 别名]
def test_addmm_backward_for_additive_shared_with_autograd(workers):
    """
    Test .backward() on Additive Shared Tensor for addmm
    """
    bob, alice, james = workers["bob"], workers["alice"], workers["james"]

    a = (
        torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )
    b = (
        torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )
    c = (
        torch.tensor([[2.0, 2], [0, 1]], requires_grad=True)
        .fix_prec()
        .share(alice, bob, crypto_provider=james)
    )

    a = syft.AutogradTensor().on(a)
    b = syft.AutogradTensor().on(b)
    c = syft.AutogradTensor().on(c)

    a_torch = torch.tensor([[3.0, 2], [-1, 2]], requires_grad=True)
    b_torch = torch.tensor([[1.0, 2], [3, 2]], requires_grad=True)
    c_torch = torch.tensor([[2.0, 2], [0, 1]], requires_grad=True)

    r = torch.addmm(c, a, b)
    r_torch = torch.addmm(c_torch, a_torch, b_torch)

    ones = torch.ones(r.shape).fix_prec().share(alice, bob, crypto_provider=james)
    ones = syft.AutogradTensor().on(ones)
    r.backward(ones)
    r_torch.backward(torch.ones(r_torch.shape))

    assert (a.grad.get().float_prec() == a_torch.grad).all()
    assert (b.grad.get().float_prec() == b_torch.grad).all()
    assert (c.grad.get().float_prec() == c_torch.grad).all() 
开发者ID:OpenMined,项目名称:PySyft,代码行数:43,代码来源:test_autograd.py


注:本文中的torch.addmm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。