本文整理匯總了Python中torch.nn.functional方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.functional方法的具體用法?Python nn.functional怎麽用?Python nn.functional使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.functional方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def __init__(self, G, F, K, E = 1,
nonlinearity = nn.functional.relu, concatenate = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
self.nonlinearity = nonlinearity
self.concatenate = concatenate
# Create parameters:
self.mixer = nn.parameter.Parameter(torch.Tensor(K, E, 2*F))
self.weight = nn.parameter.Parameter(torch.Tensor(K, E, F, G))
# Initialize parameters
self.reset_parameters()
示例2: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def forward(self, x, dec_steps, state=None,
dec_cps={}):
# x is just a time-step input [B, F]
assert len(x.size()) == 2, x.size()
if state is None:
state = self.init_hidden(x.size(0))
assert isinstance(dec_cps, dict), type(dec_cps)
x = x.unsqueeze(1)
ht = x
frames = []
# forward through RNN
for t in range(dec_steps):
if t in dec_cps:
#print('Using cp at t:{}'.format(t))
ht = dec_cps[t]
if len(ht.size()) == 2:
# add time axis
ht = ht.unsqueeze(1)
#print('Forwarding ht: ', ht.size())
ht, state = self.rnn(ht, state)
ht = self.out_fc(ht)
frames.append(ht)
frames = torch.cat(frames, 1)
return frames, state
示例3: get_nonlinearity
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def __init__(self, inp_shape, fc_size, fc_count=1, batch_norm=False,
activation="relu"):
super().__init__()
self.layers = nn.ModuleList()
sz = np.prod(inp_shape)
self.flat_size = sz
# Setup Dense/Linear Layers
for _ in range(fc_count):
slayer = nn.ModuleList([linear(sz, fc_size)])
sz = fc_size
if batch_norm:
slayer.append(nn.BatchNorm1d(sz))
self.layers.append(slayer)
self.out_shape = (sz,)
self.activation = getattr(nn.functional, activation)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def __init__(self, input_size, output_size, num_layers=1, hidden_size=None,
activation="Tanh", bias=True, isActivation=True):
super(FeedForward, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.activation = getattr(nn, activation, None)
if self.activation is not None:
self.activation = self.activation()
else:
self.activation = getattr(nn.functional, activation)()
self.isActivation = isActivation
n_inputs = [input_size] + [hidden_size] * (num_layers - 1)
n_outputs = [hidden_size] * (num_layers - 1) + [output_size]
self.linears = nn.ModuleList([nn.Linear(n_in, n_out, bias=bias)
for n_in, n_out in zip(n_inputs, n_outputs)])
示例6: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def forward(self, x, cat):
losses = []
L = self.getGraph(x)
L = self.getLaplacian(L)
# cat = torch.unsqueeze(cat,1)
# cat = torch.zeros(self.batch_size, self.class_size).scatter_(1, cat, 1)
# cat = torch.unsqueeze(cat,1)
# cat = cat.expand(-1,self.vertice,-1).double()
# x = torch.cat((x,cat),dim=2)
for i in range(len(self.F)):
x = getattr(self, 'gcn%d' % i)(x, L)
losses.append(self.rloss(x.detach(),torch.zeros_like(x)))
x = x.permute(0, 2, 1)
x = self.pool(x)
x.squeeze_(2)
for i in range(len(self.M)):
x = getattr(self, 'fc%d' % i)(x)
# x = self.relu(x)
return x,losses
示例7: show
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def show(img, label, target):
img = np.transpose(img, (1, 2, 0))
img *= 128.
img += 127.5
img = img.astype(np.uint8)
lb = ""
for i in label:
lb += CHARS[i]
tg = ""
for j in target.tolist():
tg += CHARS[int(j)]
flag = "F"
if lb == tg:
flag = "T"
# img = cv2.putText(img, lb, (0,16), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.6, (0, 0, 255), 1)
img = cv2ImgAddText(img, lb, (0, 0))
cv2.imshow("test", img)
print("target: ", tg, " ### {} ### ".format(flag), "predict: ", lb)
cv2.waitKey()
cv2.destroyAllWindows()
示例8: lstm
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def lstm(inputs, state, params):
W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params
(H, C) = state
outputs = []
for X in inputs:
I = torch.sigmoid(torch.matmul(X, W_xi) + torch.matmul(H, W_hi) + b_i)
F = torch.sigmoid(torch.matmul(X, W_xf) + torch.matmul(H, W_hf) + b_f)
O = torch.sigmoid(torch.matmul(X, W_xo) + torch.matmul(H, W_ho) + b_o)
C_tilda = torch.tanh(torch.matmul(X, W_xc) + torch.matmul(H, W_hc) + b_c)
C = F * C + I * C_tilda
H = O * C.tanh()
Y = torch.matmul(H, W_hq) + b_q
outputs.append(Y)
return outputs, (H, C)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def __init__(self, depth=28, width=2, norm=None):
super(F, self).__init__()
self.f = wideresnet.Wide_ResNet(depth, width, norm=norm)
self.energy_output = nn.Linear(self.f.last_dim, 1)
self.class_output = nn.Linear(self.f.last_dim, 10)
示例10: calculate_gain
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative_slope}^2}}`
================= ====================================================
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu')
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
示例11: kaiming_normal_std_
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def kaiming_normal_std_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in "Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification" - He, K. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std})` where
.. math::
\text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan_in}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (0 for ReLU
by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in`
preserves the magnitude of the variance of the weights in the
forward pass. Choosing `fan_out` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with 'relu' or 'leaky_relu' (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
return std
示例12: gcn_forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def gcn_forward(self, g, h, gc_layers, cat=False):
"""
Return gc_layer embedding cat.
"""
block_readout = []
for gc_layer in gc_layers[:-1]:
h = gc_layer(g, h)
block_readout.append(h)
h = gc_layers[-1](g, h)
block_readout.append(h)
if cat:
block = torch.cat(block_readout, dim=1) # N x F, F = F1 + F2 + ...
else:
block = h
return block
示例13: gcn_forward_tensorized
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
block_readout = []
for gc_layer in gc_layers:
h = gc_layer(h, adj)
block_readout.append(h)
if cat:
block = torch.cat(block_readout, dim=2) # N x F, F = F1 + F2 + ...
else:
block = h
return block
示例14: test_hook_module_functional
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def test_hook_module_functional(attr, workers):
attr = getattr(F, attr)
x = torch.Tensor([1, -1, 3, 4])
expected = attr(x)
x_ptr = x.send(workers["bob"])
res_ptr = attr(x_ptr)
res = res_ptr.get()
assert (res == expected).all()
示例15: test_functional_same_in_both_imports
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import functional [as 別名]
def test_functional_same_in_both_imports(attr):
"""This function tests that the hook modifies the behavior of
torch.nn.function regardless of the import namespace
"""
fattr = getattr(F, attr)
tattr = getattr(torch.nn.functional, attr)
x = torch.Tensor([1, -1, 3, 4])
assert (fattr(x) == tattr(x)).all()