本文整理匯總了Python中torch.nn.ParameterDict方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ParameterDict方法的具體用法?Python nn.ParameterDict怎麽用?Python nn.ParameterDict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ParameterDict方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self,
g,
embed_size,
embed_name='embed',
activation=None,
dropout=0.0):
super(RelGraphEmbed, self).__init__()
self.g = g
self.embed_size = embed_size
self.embed_name = embed_name
self.activation = activation
self.dropout = nn.Dropout(dropout)
# create weight embeddings for each node for each relation
self.embeds = nn.ParameterDict()
for ntype in g.ntypes:
embed = nn.Parameter(th.Tensor(g.number_of_nodes(ntype), self.embed_size))
nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain('relu'))
self.embeds[ntype] = embed
示例2: param_dict
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def param_dict(self) -> ModuleDict:
p = ModuleDict()
for process_name, process in self.processes.items():
p[f"process:{process_name}"] = process.param_dict()
p['measure_cov'] = self.measure_covariance.param_dict()
p['measure_var_nn'] = self._measure_var_nn
p['init_state'] = ParameterDict([('mean', self.init_mean_params)])
p['init_state'].update(self.init_covariance.param_dict().items())
p['process_cov'] = self.process_covariance.param_dict()
p['process_var_nn'] = self._process_var_nn
return p
# Transition Matrix -------:
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout, num_nodes_dict, x_types, edge_types):
super(RGCN, self).__init__()
node_types = list(num_nodes_dict.keys())
self.embs = ParameterDict({
key: Parameter(torch.Tensor(num_nodes_dict[key], in_channels))
for key in set(node_types).difference(set(x_types))
})
self.convs = ModuleList()
self.convs.append(
RGCNConv(in_channels, hidden_channels, node_types, edge_types))
for _ in range(num_layers - 2):
self.convs.append(
RGCNConv(hidden_channels, hidden_channels, node_types,
edge_types))
self.convs.append(
RGCNConv(hidden_channels, out_channels, node_types, edge_types))
self.dropout = dropout
self.reset_parameters()
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self):
super(MyDictDense, self).__init__()
self.params = nn.ParameterDict({
'linear1': nn.Parameter(torch.randn(4, 4)),
'linear2': nn.Parameter(torch.randn(4, 1))
})
# 新增一個
self.params.update({
'linear3': nn.Parameter(torch.randn(4, 2))
})
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, G, in_size, hidden_size, out_size):
super(HeteroRGCN, self).__init__()
# Use trainable node embeddings as featureless inputs.
embed_dict = {ntype : nn.Parameter(torch.Tensor(G.number_of_nodes(ntype), in_size))
for ntype in G.ntypes}
for key, embed in embed_dict.items():
nn.init.xavier_uniform_(embed)
self.embed = nn.ParameterDict(embed_dict)
# create layers
self.layer1 = HeteroRGCNLayer(in_size, hidden_size, G.etypes)
self.layer2 = HeteroRGCNLayer(hidden_size, out_size, G.etypes)
示例6: param_dict
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def param_dict(self) -> ParameterDict:
raise NotImplementedError
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, rank: int):
super().__init__(rank=rank)
num_upper_tri = int(rank * (rank - 1) / 2)
self._param_dict = ParameterDict()
self._param_dict['cholesky_log_diag'] = Parameter(data=.01 * torch.randn(rank))
self._param_dict['cholesky_off_diag'] = Parameter(data=.01 * torch.randn(num_upper_tri))
示例8: param_dict
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def param_dict(self) -> ParameterDict:
p = ParameterDict()
if self.decay is not None:
p['decay'] = self.decay.parameter
return p
示例9: param_dict
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def param_dict(self) -> ParameterDict:
p = ParameterDict()
for k in ('position', 'velocity'):
if k in self.decayed_transitions:
p[k] = self.decayed_transitions[k].parameter
return p
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, models, target='weight'):
super(LinearRegressionModel, self).__init__()
self.models = models
self.alphas = nn.ParameterDict()
self.target = target
self.model_params = {}
for model in models:
for name, p in model.named_parameters():
if name not in self.model_params.keys():
self.model_params[name] = [p.data.detach().clone()]
else:
self.model_params[name].append(p.data.detach().clone())
for name, params in self.model_params.items():
self.alphas.update({name.replace('.', '-'): nn.Parameter(torch.ones(len(params), dtype=torch.float32))})
# copy all the params to non diff format and save in this model.
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, lstm_dim=512, lstm_depth=1, sample_embedding_dim=4, address_embedding_dim=64, distribution_type_embedding_dim=8, proposal_mixture_components=10, *args, **kwargs):
super().__init__(network_type='InferenceNetworkLSTM', *args, **kwargs)
self._layers_proposal = nn.ModuleDict()
self._layers_sample_embedding = nn.ModuleDict()
self._layers_address_embedding = nn.ParameterDict()
self._layers_distribution_type_embedding = nn.ParameterDict()
self._layers_lstm = None
self._lstm_input_dim = None
self._lstm_dim = lstm_dim
self._lstm_depth = lstm_depth
self._infer_lstm_state = None
self._sample_embedding_dim = sample_embedding_dim
self._address_embedding_dim = address_embedding_dim
self._distribution_type_embedding_dim = distribution_type_embedding_dim
self._proposal_mixture_components = proposal_mixture_components
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, model, lstm_size=64, lstm_num_layers=1, tanh_constant=1.5, cell_exit_extra_step=False,
skip_target=0.4, temperature=None, branch_bias=0.25, entropy_reduction="sum"):
super().__init__(model)
self.lstm_size = lstm_size
self.lstm_num_layers = lstm_num_layers
self.tanh_constant = tanh_constant
self.temperature = temperature
self.cell_exit_extra_step = cell_exit_extra_step
self.skip_target = skip_target
self.branch_bias = branch_bias
self.lstm = StackedLSTMCell(self.lstm_num_layers, self.lstm_size, False)
self.attn_anchor = nn.Linear(self.lstm_size, self.lstm_size, bias=False)
self.attn_query = nn.Linear(self.lstm_size, self.lstm_size, bias=False)
self.v_attn = nn.Linear(self.lstm_size, 1, bias=False)
self.g_emb = nn.Parameter(torch.randn(1, self.lstm_size) * 0.1)
self.skip_targets = nn.Parameter(torch.tensor([1.0 - self.skip_target, self.skip_target]), requires_grad=False) # pylint: disable=not-callable
assert entropy_reduction in ["sum", "mean"], "Entropy reduction must be one of sum and mean."
self.entropy_reduction = torch.sum if entropy_reduction == "sum" else torch.mean
self.cross_entropy_loss = nn.CrossEntropyLoss(reduction="none")
self.bias_dict = nn.ParameterDict()
self.max_layer_choice = 0
for mutable in self.mutables:
if isinstance(mutable, LayerChoice):
if self.max_layer_choice == 0:
self.max_layer_choice = len(mutable)
assert self.max_layer_choice == len(mutable), \
"ENAS mutator requires all layer choice have the same number of candidates."
# We are judging by keys and module types to add biases to layer choices. Needs refactor.
if "reduce" in mutable.key:
def is_conv(choice):
return "conv" in str(type(choice)).lower()
bias = torch.tensor([self.branch_bias if is_conv(choice) else -self.branch_bias # pylint: disable=not-callable
for choice in mutable])
self.bias_dict[mutable.key] = nn.Parameter(bias, requires_grad=False)
self.embedding = nn.Embedding(self.max_layer_choice + 1, self.lstm_size)
self.soft = nn.Linear(self.lstm_size, self.max_layer_choice, bias=False)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, model):
super().__init__(model)
self.choices = nn.ParameterDict()
for mutable in self.mutables:
if isinstance(mutable, LayerChoice):
self.choices[mutable.key] = nn.Parameter(1.0E-3 * torch.randn(mutable.length + 1))
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, model, weights):
super(PretrainWrapper, self).__init__()
self.model = model
self.w_parameters = nn.ParameterDict({k: nn.Parameter(w) for k, w in weights.items()})
self.feature_space_size = model.feature_space_size
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ParameterDict [as 別名]
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout, num_nodes_dict, x_types, num_edge_types):
super(RGCN, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.num_layers = num_layers
self.dropout = dropout
node_types = list(num_nodes_dict.keys())
num_node_types = len(node_types)
self.num_node_types = num_node_types
self.num_edge_types = num_edge_types
# Create embeddings for all node types that do not come with features.
self.emb_dict = ParameterDict({
f'{key}': Parameter(torch.Tensor(num_nodes_dict[key], in_channels))
for key in set(node_types).difference(set(x_types))
})
I, H, O = in_channels, hidden_channels, out_channels # noqa
# Create `num_layers` many message passing layers.
self.convs = ModuleList()
self.convs.append(RGCNConv(I, H, num_node_types, num_edge_types))
for _ in range(num_layers - 2):
self.convs.append(RGCNConv(H, H, num_node_types, num_edge_types))
self.convs.append(RGCNConv(H, O, self.num_node_types, num_edge_types))
self.reset_parameters()