本文整理匯總了Python中torch.nn.Bilinear方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Bilinear方法的具體用法?Python nn.Bilinear怎麽用?Python nn.Bilinear使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.Bilinear方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: compute_madd
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def compute_madd(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_madd(module, inp, out)
elif isinstance(module, nn.ConvTranspose2d):
return compute_ConvTranspose2d_madd(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_madd(module, inp, out)
elif isinstance(module, nn.MaxPool2d):
return compute_MaxPool2d_madd(module, inp, out)
elif isinstance(module, nn.AvgPool2d):
return compute_AvgPool2d_madd(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6)):
return compute_ReLU_madd(module, inp, out)
elif isinstance(module, nn.Softmax):
return compute_Softmax_madd(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_madd(module, inp, out)
elif isinstance(module, nn.Bilinear):
return compute_Bilinear_madd(module, inp[0], inp[1], out)
else:
return 0
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self, evaluator, inputs, outputs, batch_norm=True):
"""Structural element combining two tensors by bilinear transformation.
Args:
evaluator (nn.Module): module taking a combined tensor, and
performing computation on that tensor.
inputs (list or tuple): number of input features for each input tensor.
outputs (int): number of output features.
batch_norm (bool): perform batch normalization?
"""
bilinear = nn.Bilinear(*inputs, outputs)
if batch_norm:
batch_norm_layer = nn.BatchNorm1d(outputs)
else:
batch_norm_layer = None
super(BilinearCombination, self).__init__(
lambda input, task: self.compute(input, task),
evaluator
)
self.bilinear = bilinear
self.batch_norm = batch_norm_layer
示例3: compute_madd
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def compute_madd(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_madd(module, inp, out)
elif isinstance(module, nn.ConvTranspose2d):
return compute_ConvTranspose2d_madd(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_madd(module, inp, out)
elif isinstance(module, nn.MaxPool2d):
return compute_MaxPool2d_madd(module, inp, out)
elif isinstance(module, nn.AvgPool2d):
return compute_AvgPool2d_madd(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6)):
return compute_ReLU_madd(module, inp, out)
elif isinstance(module, nn.Softmax):
return compute_Softmax_madd(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_madd(module, inp, out)
elif isinstance(module, nn.Bilinear):
return compute_Bilinear_madd(module, inp[0], inp[1], out)
else:
print("[MAdd]: {} is not supported!".format(type(module).__name__))
return 0
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self, args):
super(ActiveProbCalc, self).__init__()
self.prod_enc = get_gnn(args)
if args.tpl_enc == 'deepset':
self.tpl_enc = DeepsetTempFeaturizer(args)
elif args.tpl_enc == 'onehot':
self.tpl_enc = OnehotEmbedder(list_keys=DataInfo.unique_templates,
fn_getkey=lambda x: x,
embed_size=args.embed_dim)
else:
raise NotImplementedError
if args.att_type == 'inner_prod':
self.att_func = lambda x, y: torch.sum(x * y, dim=1).view(-1)
elif args.att_type == 'mlp':
self.pred = MLP(2 * args.embed_dim, [args.mlp_hidden, 1], nonlinearity='relu')
self.att_func = lambda x, y: self.pred(torch.cat((x, y), dim=1)).view(-1)
elif args.att_type == 'bilinear':
self.bilin = nn.Bilinear(args.embed_dim, args.embed_dim, 1)
self.att_func = lambda x, y: self.bilin(x, y).view(-1)
else:
raise NotImplementedError
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self,
inputs_size : int,
num_layers : int):
r"""Initialize BilinearNetworkLayer
Args:
inputs_size (int): Input size of Bilinear, i.e. size of embedding tensor.
num_layers (int): Number of layers of Bilinear Network
Attributes:
inputs_size (int): Size of inputs, or Product of embed_size and num_fields.
model (torch.nn.ModuleList): Module List of Bilinear Layers.
"""
# Refer to parent class
super(BilinearNetworkLayer, self).__init__()
# Bind inputs_size to inputs_size
self.inputs_size = inputs_size
# Initialize module list for Bilinear
self.model = nn.ModuleList()
# Initialize bilinear layers and add them to module list
for _ in range(num_layers):
self.model.append(nn.Bilinear(inputs_size, inputs_size, inputs_size))
示例6: reset_vgg_parameters
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def reset_vgg_parameters(m, fc_std=0.01, bfc_std=0.001):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, fc_std)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Bilinear):
m.weight.data.normal_(0, bfc_std)
if m.bias is not None:
m.bias.data.zero_()
else:
for sub in m.modules():
if m != sub:
reset_vgg_parameters(sub, fc_std=fc_std, bfc_std=bfc_std)
示例7: reset_resnet_parameters
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def reset_resnet_parameters(m, fc_std=0.01, bfc_std=0.001):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, fc_std)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Bilinear):
m.weight.data.normal_(0, bfc_std)
if m.bias is not None:
m.bias.data.zero_()
else:
for sub in m.modules():
if m != sub:
reset_resnet_parameters(sub, fc_std=fc_std, bfc_std=bfc_std)
示例8: test_metabilinear
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def test_metabilinear(bias):
meta_model = MetaBilinear(2, 3, 5, bias=bias)
model = nn.Bilinear(2, 3, 5, bias=bias)
assert isinstance(meta_model, MetaModule)
assert isinstance(meta_model, nn.Bilinear)
# Set same weights for both models
weight = torch.randn(5, 2, 3)
meta_model.weight.data.copy_(weight)
model.weight.data.copy_(weight)
if bias:
bias = torch.randn(5)
meta_model.bias.data.copy_(bias)
model.bias.data.copy_(bias)
inputs1 = torch.randn(7, 2)
inputs2 = torch.randn(7, 3)
outputs_torchmeta = meta_model(inputs1, inputs2, params=None)
outputs_nn = model(inputs1, inputs2)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
示例9: test_metabilinear_params
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def test_metabilinear_params(bias):
meta_model = MetaBilinear(2, 3, 5, bias=bias)
model = nn.Bilinear(2, 3, 5, bias=bias)
params = OrderedDict()
params['weight'] = torch.randn(5, 2, 3)
model.weight.data.copy_(params['weight'])
if bias:
params['bias'] = torch.randn(5)
model.bias.data.copy_(params['bias'])
inputs1 = torch.randn(7, 2)
inputs2 = torch.randn(7, 3)
outputs_torchmeta = meta_model(inputs1, inputs2, params=params)
outputs_nn = model(inputs1, inputs2)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
示例10: compute_madd
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def compute_madd(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_madd(module, inp, out)
elif isinstance(module, nn.ConvTranspose2d):
return compute_ConvTranspose2d_madd(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_madd(module, inp, out)
elif isinstance(module, nn.MaxPool2d):
return compute_MaxPool2d_madd(module, inp, out)
elif isinstance(module, nn.AvgPool2d):
return compute_AvgPool2d_madd(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6)):
return compute_ReLU_madd(module, inp, out)
elif isinstance(module, nn.Softmax):
return compute_Softmax_madd(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_madd(module, inp, out)
elif isinstance(module, nn.Bilinear):
return compute_Bilinear_madd(module, inp[0], inp[1], out)
else:
print(f"[MAdd]: {type(module).__name__} is not supported!")
return 0
示例11: compute_Bilinear_madd
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def compute_Bilinear_madd(module, inp1, inp2, out):
assert isinstance(module, nn.Bilinear)
assert len(inp1.size()) == 2 and len(inp2.size()) == 2 and len(out.size()) == 2
num_in_features_1 = inp1.size()[1]
num_in_features_2 = inp2.size()[1]
num_out_features = out.size()[1]
mul = num_in_features_1 * num_in_features_2 + num_in_features_2
add = num_in_features_1 * num_in_features_2 + num_in_features_2 - 1
return num_out_features * (mul + add)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
# 為什麽+1??
# 雙仿變換的矩陣形式:
# S=(H_head⊕1)·W·H_dep
# 即:(d*d) = (d*(k+1)) * ((k+1)*k) * (k*d)
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self,
input_dims,
output_dim,
mm_dim=1600,
chunks=20,
shared=False,
dropout_input=0.,
dropout_pre_lin=0.,
dropout_output=0.,
pos_norm='before_cat'):
super(BlockTucker, self).__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.chunks = chunks
self.shared = shared
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
assert(pos_norm in ['before_cat', 'after_cat'])
self.pos_norm = pos_norm
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
if self.shared:
self.linear1 = self.linear0
else:
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.sizes_list = get_sizes_list(mm_dim, chunks)
bilinears = []
for size in self.sizes_list:
bilinears.append(
nn.Bilinear(size, size, size)
)
self.bilinears = nn.ModuleList(bilinears)
self.linear_out = nn.Linear(self.mm_dim, self.output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self, num_inputs1, num_inputs2):
super().__init__()
self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Bilinear [as 別名]
def __init__(self, in1_features, in2_features, num_label, bias=True):
r"""
:param in1_features: 輸入的特征1維度
:param in2_features: 輸入的特征2維度
:param num_label: 邊類別的個數
:param bias: 是否使用bias. Default: ``True``
"""
super(LabelBilinear, self).__init__()
self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias)
self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False)