本文整理汇总了Python中torch.nn.functional.linear方法的典型用法代码示例。如果您正苦于以下问题:Python functional.linear方法的具体用法?Python functional.linear怎么用?Python functional.linear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, input, hx, att_score):
"""
References
----------
https://github.com/pytorch/pytorch/blob/v0.4.1/torch/nn/_functions/rnn.py#L49
"""
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, i_z, i_n = gi.chunk(3, 1)
h_r, h_z, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
# updategate = torch.sigmoid(i_z + h_z)
newgate = torch.tanh(i_n + resetgate * h_n)
# hy = newgate + updategate * (hx - newgate)
att_score = att_score.view(-1, 1)
hy = (1. - att_score) * hx + att_score * newgate
return hy
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
"""
forward pass of the layer
:param x: input
:return: y => output
"""
from torch.nn.functional import linear
return linear(x, self.weight * self.scale,
self.bias if self.use_bias else None)
# -----------------------------------------------------------------------------------
# Pixelwise feature vector normalization.
# reference:
# https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
# -----------------------------------------------------------------------------------
示例3: inverse_no_cache
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def inverse_no_cache(self, inputs):
"""Cost:
output = O(D^3 + D^2N)
logabsdet = O(D^3)
where:
D = num of features
N = num of inputs
"""
batch_size = inputs.shape[0]
outputs = inputs - self.bias
outputs, lu = torch.gesv(outputs.t(), self._weight) # Linear-system solver.
outputs = outputs.t()
# The linear-system solver returns the LU decomposition of the weights, which we
# can use to obtain the log absolute determinant directly.
logabsdet = -torch.sum(torch.log(torch.abs(torch.diag(lu))))
logabsdet = logabsdet * torch.ones(batch_size)
return outputs, logabsdet
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, inputs, labels):
cos_th = F.linear(inputs, F.normalize(self.weight))
cos_th = cos_th.clamp(-1, 1)
sin_th = torch.sqrt(1.0 - torch.pow(cos_th, 2))
cos_th_m = cos_th * self.cos_m - sin_th * self.sin_m
cos_th_m = torch.where(cos_th > self.th, cos_th_m, cos_th - self.mm)
cond_v = cos_th - self.th
cond = cond_v <= 0
cos_th_m[cond] = (cos_th - self.mm)[cond]
if labels.dim() == 1:
labels = labels.unsqueeze(-1)
onehot = torch.zeros(cos_th.size()).cuda()
onehot.scatter_(1, labels, 1)
outputs = onehot * cos_th_m + (1.0 - onehot) * cos_th
outputs = outputs * self.s
return outputs
示例5: get_loadings
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def get_loadings(self) -> np.ndarray:
"""Extract per-gene weights (for each Z, shape is genes by dim(Z)) in the linear decoder."""
# This is BW, where B is diag(b) batch norm, W is weight matrix
if self.use_batch_norm is True:
w = self.decoder.factor_regressor.fc_layers[0][0].weight
bn = self.decoder.factor_regressor.fc_layers[0][1]
sigma = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
b = gamma / sigma
bI = torch.diag(b)
loadings = torch.matmul(bI, w)
else:
loadings = self.decoder.factor_regressor.fc_layers[0][0].weight
loadings = loadings.detach().cpu().numpy()
if self.n_batch > 1:
loadings = loadings[:, : -self.n_batch]
return loadings
示例6: LSTMCell
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
"""
A modified LSTM cell with hard sigmoid activation on the input, forget and output gates.
"""
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = hard_sigmoid(ingate)
forgetgate = hard_sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = hard_sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
# Pass Add bias.
x += self.bias
# Evaluate activation function.
if self.act == "linear":
pass
elif self.act == 'lrelu':
x = F.leaky_relu(x, self.alpha, inplace=True)
x = x * np.sqrt(2) # original repo def_gain=np.sqrt(2).
# Scale by gain.
if self.gain != 1:
x = x * self.gain
return x
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
if self.zero_mean:
lrt_mean = 0.0
else:
lrt_mean = F.linear(x, self.W)
if self.bias is not None:
lrt_mean = lrt_mean + self.bias
sigma2 = Variable.exp(self.log_alpha) * self.W * self.W
if self.permute_sigma:
sigma2 = sigma2.view(-1)[torch.randperm(self.in_features * self.out_features).cuda()].view(self.out_features, self.in_features)
lrt_std = Variable.sqrt(1e-16 + F.linear(x * x, sigma2))
if self.training:
eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
else:
eps = 0.0
return lrt_mean + lrt_std * eps
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project).reshape(-1, 4)
return x
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x, lens, hidden=None):
emb_x = self.dp1(self.emb(x))
if not self.training:
self.rnn.flatten_parameters()
packed = nn.utils.rnn.pack_padded_sequence(
emb_x, lens, batch_first=True, enforce_sorted=False)
# output: (seq_len, batch, hidden)
outputs, hidden = self.rnn(packed, hidden)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
if self.emb_tying:
outputs = F.linear(self.dp2(outputs), self.emb.weight)
else:
outputs = self.trans(self.dp2(outputs))
return outputs, hidden
示例11: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, input):
if self.masked_weight is None:
return F.linear(input, self.mask * self.weight, self.bias)
else:
# ~17% speedup for Prog Sampling.
return F.linear(input, self.masked_weight, self.bias)
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, x):
if self.groups == 1:
out = super(Linear, self).forward(x)
else:
x_g = x.chunk(self.groups, dim=-1)
w_g = self.weight.chunk(self.groups, dim=-1)
out = torch.cat([F.linear(x_g[i], w_g[i])
for i in range(self.groups)], -1)
if self.bias is not None:
out += self.bias
return out
开发者ID:nadavbh12,项目名称:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代码行数:13,代码来源:linear.py
示例13: forward_no_cache
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward_no_cache(self, inputs):
"""Cost:
output = O(D^2N)
logabsdet = O(D)
where:
D = num of features
N = num of inputs
"""
lower, upper = self._create_lower_upper()
outputs = F.linear(inputs, upper)
outputs = F.linear(outputs, lower, self.bias)
logabsdet = self.logabsdet() * inputs.new_ones(outputs.shape[0])
return outputs, logabsdet
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def forward(self, inputs, context=None):
if not self.training and self.using_cache:
self._check_forward_cache()
outputs = F.linear(inputs, self.cache.weight, self.bias)
logabsdet = self.cache.logabsdet * torch.ones(outputs.shape[0])
return outputs, logabsdet
else:
return self.forward_no_cache(inputs)
示例15: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import linear [as 别名]
def inverse(self, inputs, context=None):
if not self.training and self.using_cache:
self._check_inverse_cache()
outputs = F.linear(inputs - self.bias, self.cache.inverse)
logabsdet = (-self.cache.logabsdet) * torch.ones(outputs.shape[0])
return outputs, logabsdet
else:
return self.inverse_no_cache(inputs)