当前位置: 首页>>代码示例>>Python>>正文


Python functional.linear方法代码示例

本文整理汇总了Python中torch.nn.functional.linear方法的典型用法代码示例。如果您正苦于以下问题:Python functional.linear方法的具体用法?Python functional.linear怎么用?Python functional.linear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, input, hx, att_score):
        """

        References
        ----------
            https://github.com/pytorch/pytorch/blob/v0.4.1/torch/nn/_functions/rnn.py#L49
        """

        gi = F.linear(input, self.weight_ih, self.bias_ih)
        gh = F.linear(hx, self.weight_hh, self.bias_hh)
        i_r, i_z, i_n = gi.chunk(3, 1)
        h_r, h_z, h_n = gh.chunk(3, 1)

        resetgate = torch.sigmoid(i_r + h_r)
        # updategate = torch.sigmoid(i_z + h_z)
        newgate = torch.tanh(i_n + resetgate * h_n)
        # hy = newgate + updategate * (hx - newgate)

        att_score = att_score.view(-1, 1)

        hy = (1. - att_score) * hx + att_score * newgate

        return hy 
开发者ID:GitHub-HongweiZhang,项目名称:prediction-flow,代码行数:25,代码来源:rnn.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
        """
        forward pass of the layer
        :param x: input
        :return: y => output
        """
        from torch.nn.functional import linear
        return linear(x, self.weight * self.scale,
                      self.bias if self.use_bias else None)


# -----------------------------------------------------------------------------------
# Pixelwise feature vector normalization.
# reference:
# https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
# ----------------------------------------------------------------------------------- 
开发者ID:akanimax,项目名称:BMSG-GAN,代码行数:18,代码来源:CustomLayers.py

示例3: inverse_no_cache

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def inverse_no_cache(self, inputs):
        """Cost:
            output = O(D^3 + D^2N)
            logabsdet = O(D^3)
        where:
            D = num of features
            N = num of inputs
        """
        batch_size = inputs.shape[0]
        outputs = inputs - self.bias
        outputs, lu = torch.gesv(outputs.t(), self._weight)  # Linear-system solver.
        outputs = outputs.t()
        # The linear-system solver returns the LU decomposition of the weights, which we
        # can use to obtain the log absolute determinant directly.
        logabsdet = -torch.sum(torch.log(torch.abs(torch.diag(lu))))
        logabsdet = logabsdet * torch.ones(batch_size)
        return outputs, logabsdet 
开发者ID:bayesiains,项目名称:nsf,代码行数:19,代码来源:linear.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, inputs, labels):
        cos_th = F.linear(inputs, F.normalize(self.weight))
        cos_th = cos_th.clamp(-1, 1)
        sin_th = torch.sqrt(1.0 - torch.pow(cos_th, 2))
        cos_th_m = cos_th * self.cos_m - sin_th * self.sin_m
        cos_th_m = torch.where(cos_th > self.th, cos_th_m, cos_th - self.mm)

        cond_v = cos_th - self.th
        cond = cond_v <= 0
        cos_th_m[cond] = (cos_th - self.mm)[cond]

        if labels.dim() == 1:
            labels = labels.unsqueeze(-1)
        onehot = torch.zeros(cos_th.size()).cuda()
        onehot.scatter_(1, labels, 1)
        outputs = onehot * cos_th_m + (1.0 - onehot) * cos_th
        outputs = outputs * self.s
        return outputs 
开发者ID:pudae,项目名称:kaggle-humpback,代码行数:20,代码来源:identifier.py

示例5: get_loadings

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def get_loadings(self) -> np.ndarray:
        """Extract per-gene weights (for each Z, shape is genes by dim(Z)) in the linear decoder."""
        # This is BW, where B is diag(b) batch norm, W is weight matrix
        if self.use_batch_norm is True:
            w = self.decoder.factor_regressor.fc_layers[0][0].weight
            bn = self.decoder.factor_regressor.fc_layers[0][1]
            sigma = torch.sqrt(bn.running_var + bn.eps)
            gamma = bn.weight
            b = gamma / sigma
            bI = torch.diag(b)
            loadings = torch.matmul(bI, w)
        else:
            loadings = self.decoder.factor_regressor.fc_layers[0][0].weight
        loadings = loadings.detach().cpu().numpy()
        if self.n_batch > 1:
            loadings = loadings[:, : -self.n_batch]

        return loadings 
开发者ID:YosefLab,项目名称:scVI,代码行数:20,代码来源:vae.py

示例6: LSTMCell

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    """
    A modified LSTM cell with hard sigmoid activation on the input, forget and output gates.
    """
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

    ingate = hard_sigmoid(ingate)
    forgetgate = hard_sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = hard_sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)

    return hy, cy 
开发者ID:chenyangh,项目名称:SemEval2019Task3,代码行数:20,代码来源:lstm_hard_sigmoid.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
        # Pass Add bias.
        x += self.bias

        # Evaluate activation function.
        if self.act == "linear":
            pass
        elif self.act == 'lrelu':
            x = F.leaky_relu(x, self.alpha, inplace=True)
            x = x * np.sqrt(2)  # original repo def_gain=np.sqrt(2).

        # Scale by gain.
        if self.gain != 1:
            x = x * self.gain

        return x 
开发者ID:tomguluson92,项目名称:StyleGAN2_PyTorch,代码行数:18,代码来源:stylegan2.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
        if self.zero_mean:
            lrt_mean = 0.0
        else:
            lrt_mean = F.linear(x, self.W)
        if self.bias is not None:
            lrt_mean = lrt_mean + self.bias

        sigma2 = Variable.exp(self.log_alpha) * self.W * self.W
        if self.permute_sigma:
            sigma2 = sigma2.view(-1)[torch.randperm(self.in_features * self.out_features).cuda()].view(self.out_features, self.in_features)

        lrt_std = Variable.sqrt(1e-16 + F.linear(x * x, sigma2))
        if self.training:
            eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
        else:
            eps = 0.0
        return lrt_mean + lrt_std * eps 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:20,代码来源:layers.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
        """Forward feature from the regression head to get integral result of
        bounding box location.

        Args:
            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
                n is self.reg_max.

        Returns:
            x (Tensor): Integral result of box locations, i.e., distance
                offsets from the box center in four directions, shape (N, 4).
        """
        x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
        x = F.linear(x, self.project).reshape(-1, 4)
        return x 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:17,代码来源:gfl_head.py

示例10: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x, lens, hidden=None):
        emb_x = self.dp1(self.emb(x))
        if not self.training:
            self.rnn.flatten_parameters()
        packed = nn.utils.rnn.pack_padded_sequence(
            emb_x, lens, batch_first=True, enforce_sorted=False)
        # output: (seq_len, batch, hidden)
        outputs, hidden = self.rnn(packed, hidden)
        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True)
        if self.emb_tying:
            outputs = F.linear(self.dp2(outputs), self.emb.weight)
        else:
            outputs = self.trans(self.dp2(outputs))
        return outputs, hidden 
开发者ID:Alexander-H-Liu,项目名称:End-to-end-ASR-Pytorch,代码行数:17,代码来源:lm.py

示例11: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, input):
        if self.masked_weight is None:
            return F.linear(input, self.mask * self.weight, self.bias)
        else:
            # ~17% speedup for Prog Sampling.
            return F.linear(input, self.masked_weight, self.bias) 
开发者ID:naru-project,项目名称:naru,代码行数:8,代码来源:made.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
        if self.groups == 1:
            out = super(Linear, self).forward(x)
        else:
            x_g = x.chunk(self.groups, dim=-1)
            w_g = self.weight.chunk(self.groups, dim=-1)
            out = torch.cat([F.linear(x_g[i], w_g[i])
                             for i in range(self.groups)], -1)
            if self.bias is not None:
                out += self.bias
        return out 
开发者ID:nadavbh12,项目名称:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代码行数:13,代码来源:linear.py

示例13: forward_no_cache

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward_no_cache(self, inputs):
        """Cost:
            output = O(D^2N)
            logabsdet = O(D)
        where:
            D = num of features
            N = num of inputs
        """
        lower, upper = self._create_lower_upper()
        outputs = F.linear(inputs, upper)
        outputs = F.linear(outputs, lower, self.bias)
        logabsdet = self.logabsdet() * inputs.new_ones(outputs.shape[0])
        return outputs, logabsdet 
开发者ID:bayesiains,项目名称:nsf,代码行数:15,代码来源:lu.py

示例14: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, inputs, context=None):
        if not self.training and self.using_cache:
            self._check_forward_cache()
            outputs = F.linear(inputs, self.cache.weight, self.bias)
            logabsdet = self.cache.logabsdet * torch.ones(outputs.shape[0])
            return outputs, logabsdet
        else:
            return self.forward_no_cache(inputs) 
开发者ID:bayesiains,项目名称:nsf,代码行数:10,代码来源:linear.py

示例15: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def inverse(self, inputs, context=None):
        if not self.training and self.using_cache:
            self._check_inverse_cache()
            outputs = F.linear(inputs - self.bias, self.cache.inverse)
            logabsdet = (-self.cache.logabsdet) * torch.ones(outputs.shape[0])
            return outputs, logabsdet
        else:
            return self.inverse_no_cache(inputs) 
开发者ID:bayesiains,项目名称:nsf,代码行数:10,代码来源:linear.py


注:本文中的torch.nn.functional.linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。