本文整理匯總了Python中torch.nn.ParameterList方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ParameterList方法的具體用法?Python nn.ParameterList怎麽用?Python nn.ParameterList使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ParameterList方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __set_readout
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __set_readout(self, readout_def, args):
self.r_definition = readout_def.lower()
self.r_function = {
'duvenaud': self.r_duvenaud,
'ggnn': self.r_ggnn,
'intnet': self.r_intnet,
'mpnn': self.r_mpnn
}.get(self.r_definition, None)
if self.r_function is None:
print('WARNING!: Readout Function has not been set correctly\n\tIncorrect definition ' + readout_def)
quit()
init_parameters = {
'duvenaud': self.init_duvenaud,
'ggnn': self.init_ggnn,
'intnet': self.init_intnet,
'mpnn': self.init_mpnn
}.get(self.r_definition, lambda x: (nn.ParameterList([]), nn.ModuleList([]), {}))
self.learn_args, self.learn_modules, self.args = init_parameters(args)
# Get the name of the used readout function
示例2: init_duvenaud
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def init_duvenaud(self, params):
learn_args = []
learn_modules = []
args = {}
args['out'] = params['out']
# Define a parameter matrix W for each layer.
for l in range(params['layers']):
learn_args.append(nn.Parameter(torch.randn(params['in'][l], params['out'])))
# learn_modules.append(nn.Linear(params['out'], params['target']))
learn_modules.append(NNet(n_in=params['out'], n_out=params['target']))
return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
# GG-NN, Li et al.
示例3: init_ggnn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def init_ggnn(self, params):
learn_args = []
learn_modules = []
args = {}
# i
learn_modules.append(NNet(n_in=2*params['in'], n_out=params['target']))
# j
learn_modules.append(NNet(n_in=params['in'], n_out=params['target']))
args['out'] = params['target']
return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
# Battaglia et al. (2016), Interaction Networks
示例4: init_duvenaud
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def init_duvenaud(self, params):
learn_args = []
learn_modules = []
args = {}
# Filter degree 0 (the message will be 0 and therefore there is no update
args['deg'] = [i for i in params['deg'] if i!=0]
args['in'] = params['in']
args['out'] = params['out']
# Define a parameter matrix H for each degree.
learn_args.append(torch.nn.Parameter(torch.randn(len(args['deg']), args['in'], args['out'])))
return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
# GG-NN, Li et al.
示例5: get_params
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def get_params():
def _one(shape):
ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
return torch.nn.Parameter(ts, requires_grad=True)
def _three():
return (_one((num_inputs, num_hiddens)),
_one((num_hiddens, num_hiddens)),
torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))
W_xz, W_hz, b_z = _three() # 更新門參數
W_xr, W_hr, b_r = _three() # 重置門參數
W_xh, W_hh, b_h = _three() # 候選隱藏層參數
# 輸出層參數
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q])
示例6: get_params
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def get_params():
def _one(shape):
ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
return torch.nn.Parameter(ts, requires_grad=True)
def _three():
return (_one((num_inputs, num_hiddens)),
_one((num_hiddens, num_hiddens)),
torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))
W_xi, W_hi, b_i = _three() # 輸入門
W_xf, W_hf, b_f = _three() # 遺忘門
W_xo, W_ho, b_o = _three() # 輸出門
W_xc, W_hc, b_c = _three() # 候選記憶細胞
# 輸出層參數
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
return nn.ParameterList([W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q])
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, event_type, field_types, hidden_size):
super(EventTable, self).__init__()
self.event_type = event_type
self.field_types = field_types
self.num_fields = len(field_types)
self.hidden_size = hidden_size
self.event_cls = nn.Linear(hidden_size, 2) # 0: NA, 1: trigger this event
self.field_cls_list = nn.ModuleList(
# 0: NA, 1: trigger this field
[nn.Linear(hidden_size, 2) for _ in range(self.num_fields)]
)
# used to aggregate sentence and span embedding
self.event_query = nn.Parameter(torch.Tensor(1, self.hidden_size))
# used for fields that do not contain any valid span
# self.none_span_emb = nn.Parameter(torch.Tensor(1, self.hidden_size))
# used for aggregating history filled span info
self.field_queries = nn.ParameterList(
[nn.Parameter(torch.Tensor(1, self.hidden_size)) for _ in range(self.num_fields)]
)
self.reset_parameters()
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, inplanes, planes, stride=1, downsample=None, fixup_l=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.scale = nn.Parameter(torch.ones(1))
self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(6)])
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))
k = self.conv2.kernel_size[0] * self.conv2.kernel_size[1] * self.conv2.out_channels
self.conv2.weight.data.normal_(0, fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))
self.conv3.weight.data.zero_()
if downsample is not None:
k = self.downsample.kernel_size[0] * self.downsample.kernel_size[1] * self.downsample.out_channels
self.downsample.weight.data.normal_(0, math.sqrt(2. / k))
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, in_planes, out_planes, stride, dropout, fixup_l, fixup_coeff):
super(BasicBlock, self).__init__()
self._dropout = dropout
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.equalInOut = in_planes == out_planes
self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_res = not self.equalInOut and self.conv_res or None
self.scale = nn.Parameter(torch.ones(1))
self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)])
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))
self.conv2.weight.data.zero_()
if self.conv_res is not None:
k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1] * self.conv_res.out_channels
self.conv_res.weight.data.normal_(0, math.sqrt(2. / k))
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, in_planes, planes, stride=1, use_fixup=False, fixup_l=1, fixup_coeff=1):
super(BasicBlock, self).__init__()
self._use_fixup = use_fixup
self._fixup_l = fixup_l
self._fixup_coeff = fixup_coeff
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
if use_fixup:
self.scale = nn.Parameter(torch.ones(1))
self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)])
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2. / k))
self.conv2.weight.data.zero_()
示例11: collect_parameters
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def collect_parameters(self):
nodes = [self.root]
self.module_list = nn.ModuleList()
self.param_list = nn.ParameterList()
while nodes:
node = nodes.pop(0)
if node.leaf:
param = node.param
self.param_list.append(param)
else:
fc = node.fc
beta = node.beta
nodes.append(node.right)
nodes.append(node.left)
self.param_list.append(beta)
self.module_list.append(fc)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, in_size, out_size, hidden_size=128, depth=2,
input_kwargs=None, internal_kwargs=None):
super().__init__()
self.depth = depth
self.input_blocks = nn.ModuleList([
self.make_block(in_size, hidden_size, **input_kwargs)
for idx in range(depth)
])
self.internal_blocks = nn.ModuleList([
nn.Identity()
] + [
self.make_block(hidden_size, hidden_size, **internal_kwargs)
for idx in range(depth - 1)
])
self.internal_constants = nn.ParameterList([
self.make_constant(hidden_size)
for idx in range(depth)
])
self.output_block = self.make_block(hidden_size, out_size, **internal_kwargs)
self.output_constant = self.make_constant(out_size)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, in_size, out_size, N=1, index=0,
activation=func.relu, eps=0,
normalization=None, **kwargs):
super(FixUpBlockNd, self).__init__()
conv = getattr(nn, f"Conv{N}d")
self.normalization = normalization or (lambda x: x)
self.convs = nn.ModuleList([
normalization(conv(in_size, out_size, 3, bias=False, **kwargs)),
normalization(conv(out_size, out_size, 3, bias=False, **kwargs)),
])
self.project = (lambda x: x) if in_size == out_size else normalization(conv(in_size, out_size, 1, bias=False))
self.scale = nn.Parameter(torch.tensor(1.0, dtype=torch.float))
self.biases = nn.ParameterList([
nn.Parameter(torch.tensor(0.0, dtype=torch.float))
for _ in range(4)
])
with torch.no_grad():
self.convs[0].weight.data = self.convs[0].weight.data * (index + 1) ** (-0.5)
self.convs[1].weight.data.normal_()
self.convs[1].weight.data = self.convs[1].weight.data * eps
self.activation = activation
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, num_possible_inputs, num_features, eps=1e-5, momentum=0.1, affine=True):
super(WSBN, self).__init__()
self.num_possible_inputs = num_possible_inputs
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
if self.affine:
self.weight = nn.ParameterList([nn.Parameter(torch.Tensor(num_features)) for _ in range(num_possible_inputs)])
self.bias = nn.ParameterList([nn.Parameter(torch.Tensor(num_features)) for _ in range(num_possible_inputs)])
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterList [as 別名]
def __init__(self, num_possible_inputs, C_in, C_out, kernel_size, stride, padding, affine=True):
super(WSSepConv, self).__init__()
self.num_possible_inputs = num_possible_inputs
self.C_out = C_out
self.C_in = C_in
self.padding = padding
self.relu1 = nn.ReLU(inplace=INPLACE)
self.W1_depthwise = nn.ParameterList([nn.Parameter(torch.Tensor(C_in, 1, kernel_size, kernel_size)) for i in range(num_possible_inputs)])
self.W1_pointwise = nn.ParameterList([nn.Parameter(torch.Tensor(C_out, C_in, 1, 1)) for i in range(num_possible_inputs)])
self.bn1 = WSBN(num_possible_inputs, C_in, affine=affine)
self.relu2 = nn.ReLU(inplace=INPLACE)
self.W2_depthwise = nn.ParameterList([nn.Parameter(torch.Tensor(C_in, 1, kernel_size, kernel_size)) for i in range(num_possible_inputs)])
self.W2_pointwise = nn.ParameterList([nn.Parameter(torch.Tensor(C_out, C_in, 1, 1)) for i in range(num_possible_inputs)])
self.bn2 = WSBN(num_possible_inputs, C_in, affine=affine)