當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Parameter方法代碼示例

本文整理匯總了Python中torch.nn.Parameter方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Parameter方法的具體用法?Python nn.Parameter怎麽用?Python nn.Parameter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Parameter方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
        super(AttentionScore, self).__init__()
        self.correlation_func = correlation_func
        self.hidden_size = hidden_size

        if correlation_func == 2 or correlation_func == 3:
            self.linear = nn.Linear(input_size, hidden_size, bias=False)
            if do_similarity:
                self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
            else:
                self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)

        if correlation_func == 4:
            self.linear = nn.Linear(input_size, input_size, bias=False)

        if correlation_func == 5:
            self.linear = nn.Linear(input_size, hidden_size, bias=False) 
開發者ID:Nrgeup,項目名稱:controllable-text-attribute-transfer,代碼行數:19,代碼來源:model2.py

示例2: init_duvenaud

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def init_duvenaud(self, params):
        learn_args = []
        learn_modules = []
        args = {}

        args['out'] = params['out']

        # Define a parameter matrix W for each layer.
        for l in range(params['layers']):
            learn_args.append(nn.Parameter(torch.randn(params['in'][l], params['out'])))

        # learn_modules.append(nn.Linear(params['out'], params['target']))

        learn_modules.append(NNet(n_in=params['out'], n_out=params['target']))
        return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args

    # GG-NN, Li et al. 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:19,代碼來源:ReadoutFunction.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, **kwargs):
        """
        kwargs:
            target_size: int, target size
            device: str, device
        """
        super(CRF, self).__init__()
        for k in kwargs:
            self.__setattr__(k, kwargs[k])
        device = self.device

        # init transitions
        self.START_TAG, self.STOP_TAG = -2, -1
        init_transitions = torch.zeros(self.target_size + 2, self.target_size + 2, device=device)
        init_transitions[:, self.START_TAG] = -10000.0
        init_transitions[self.STOP_TAG, :] = -10000.0
        self.transitions = nn.Parameter(init_transitions) 
開發者ID:bamtercelboo,項目名稱:pytorch_NER_BiLSTM_CNN_CRF,代碼行數:19,代碼來源:CRF.py

示例4: load_pretrained_imagenet_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def load_pretrained_imagenet_weights(model, state_dict):
    own_state = model.state_dict()
    for name, param in state_dict.items():
        if ('layer4' in name) or ('layer5' in name) or ('fc' in name):
            continue
        if (name in own_state):
            if isinstance(param, nn.Parameter):
                # backwards compatibility for serialized parameters
                param = param.data
            try:
                own_state[name].copy_(param)
            except Exception:
                raise RuntimeError('While copying the parameter named {}, '
                                   'whose dimensions in the model are {} and '
                                   'whose dimensions in the checkpoint are {}.'
                                   .format(name, own_state[name].size(), param.size()))
        else:
            raise KeyError('unexpected key "{}" in state_dict'
                           .format(name)) 
開發者ID:guoruoqian,項目名稱:cascade-rcnn_Pytorch,代碼行數:21,代碼來源:detnet_backbone.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True):
        super(_SwitchNorm, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        if self.affine:
            self.weight = nn.Parameter(torch.Tensor(num_features))
            self.bias = nn.Parameter(torch.Tensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)

        self.mean_weight = nn.Parameter(torch.ones(3))
        self.var_weight = nn.Parameter(torch.ones(3))

        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features)) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:20,代碼來源:norm.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, hidden_size, h_state_embed_size=None, in_memory_embed_size=None, atten_type='simple'):
        super(Attention, self).__init__()
        self.atten_type = atten_type
        if not h_state_embed_size:
            h_state_embed_size = hidden_size
        if not in_memory_embed_size:
            in_memory_embed_size = hidden_size
        if atten_type in ('mul', 'add'):
            self.W = torch.Tensor(h_state_embed_size, hidden_size)
            self.W = nn.Parameter(nn.init.xavier_uniform_(self.W))
            if atten_type == 'add':
                self.W2 = torch.Tensor(in_memory_embed_size, hidden_size)
                self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2))
                self.W3 = torch.Tensor(hidden_size, 1)
                self.W3 = nn.Parameter(nn.init.xavier_uniform_(self.W3))
        elif atten_type == 'simple':
            pass
        else:
            raise RuntimeError('Unknown atten_type: {}'.format(self.atten_type)) 
開發者ID:hugochan,項目名稱:BAMnet,代碼行數:21,代碼來源:modules.py

示例7: drop_inputs

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def drop_inputs(self, input_shape, index, dim=0):
        """
        Previous layer is expected to be a convnet which just underwent pruning
        Drop cells connected to the pruned layer of the convnet
        :param input_shape: shape of inputs before flattening, should exclude batch_size
        :param index: index to drop
        :param dim: dimension where index is dropped, w.r.t input_shape
        :return:
        """
        is_cuda = self.weight.is_cuda

        reshaped = self.weight.view(-1, *input_shape)
        dim_length = input_shape[dim]
        indices = Variable(torch.LongTensor([i for i in range(dim_length) if i != index]))
        indices = indices.cuda() if is_cuda else indices

        self.weight = nn.Parameter(
            reshaped.index_select(dim+1, indices)
                .data
                .view(self.out_features, -1)
        )
        self.in_features = self.weight.size()[1] 
開發者ID:alexfjw,項目名稱:prunnable-layers-pytorch,代碼行數:24,代碼來源:prunable_nn.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, hidden_size, method="concat"):
        """

        Args:
            hidden_size: <int>, hidden size
                         previous hidden state size of decoder
            method: <str>, {"concat"}
                        Attention method

        Notes:
            we use the GRU outputs instead of using encoder t-step
            hidden sates for attention, because the pytorch-GRU hidden_n only
            contains the last time step information.
        """
        super(Attn, self).__init__()
        self.method = method
        self.hidden_size = hidden_size
        self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
        self.v = nn.Parameter(torch.rand(hidden_size))
        stdv = 1. / math.sqrt(self.v.size(0))
        self.v.data.normal_(mean=0, std=stdv) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:23,代碼來源:attention.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, n_classes):
        super(SSD300, self).__init__()

        self.n_classes = n_classes

        self.base = VGGBase()
        self.aux_convs = AuxiliaryConvolutions()
        self.pred_convs = PredictionConvolutions(n_classes)

        # Since lower level features (conv4_3_feats) have considerably larger scales, we take the L2 norm and rescale
        # Rescale factor is initially set at 20, but is learned for each channel during back-prop
        self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1))  # there are 512 channels in conv4_3_feats
        nn.init.constant_(self.rescale_factors, 20)

        # Prior boxes
        self.priors_cxcy = self.create_prior_boxes() 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:18,代碼來源:model.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, element_dim, num_step_encoder):
    """ Implementation of Set2Set """
    super(Set2Set, self).__init__()
    self.element_dim = element_dim
    self.num_step_encoder = num_step_encoder
    self.LSTM_encoder = Set2SetLSTM(element_dim)
    self.LSTM_decoder = Set2SetLSTM(element_dim)
    self.W_1 = nn.Parameter(torch.ones(self.element_dim, self.element_dim))
    self.W_2 = nn.Parameter(torch.ones(self.element_dim, 1))
    self.W_3 = nn.Parameter(torch.ones(self.element_dim, self.element_dim))
    self.W_4 = nn.Parameter(torch.ones(self.element_dim, 1))
    self.W_5 = nn.Parameter(torch.ones(self.element_dim, self.element_dim))
    self.W_6 = nn.Parameter(torch.ones(self.element_dim, self.element_dim))
    self.W_7 = nn.Parameter(torch.ones(self.element_dim, 1))
    self.register_parameter('W_1', self.W_1)
    self.register_parameter('W_2', self.W_2)
    self.register_parameter('W_3', self.W_3)
    self.register_parameter('W_4', self.W_4)
    self.register_parameter('W_5', self.W_5)
    self.register_parameter('W_6', self.W_6)
    self.register_parameter('W_7', self.W_7)

    self._init_param() 
開發者ID:lrjconan,項目名稱:LanczosNetwork,代碼行數:25,代碼來源:set2set.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, event_type, field_types, hidden_size):
        super(EventTable, self).__init__()

        self.event_type = event_type
        self.field_types = field_types
        self.num_fields = len(field_types)
        self.hidden_size = hidden_size

        self.event_cls = nn.Linear(hidden_size, 2)  # 0: NA, 1: trigger this event
        self.field_cls_list = nn.ModuleList(
            # 0: NA, 1: trigger this field
            [nn.Linear(hidden_size, 2) for _ in range(self.num_fields)]
        )

        # used to aggregate sentence and span embedding
        self.event_query = nn.Parameter(torch.Tensor(1, self.hidden_size))
        # used for fields that do not contain any valid span
        # self.none_span_emb = nn.Parameter(torch.Tensor(1, self.hidden_size))
        # used for aggregating history filled span info
        self.field_queries = nn.ParameterList(
            [nn.Parameter(torch.Tensor(1, self.hidden_size)) for _ in range(self.num_fields)]
        )

        self.reset_parameters() 
開發者ID:dolphin-zs,項目名稱:Doc2EDAG,代碼行數:26,代碼來源:dee_model.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, hidden_size, output_size, n_layers=1, dropout=0.1):
        super(PtrDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size ### Vocab size
        self.n_layers = n_layers
        self.dropout = dropout
        self.embedding = nn.Embedding(output_size, hidden_size)
        self.embedding_dropout = nn.Dropout(dropout)
        self.lstm = nn.LSTM(2*hidden_size, hidden_size, n_layers, dropout=dropout)
        self.W1 = nn.Linear(2*hidden_size, hidden_size)
        self.v = nn.Parameter(torch.rand(hidden_size))
        stdv = 1. / math.sqrt(self.v.size(0))
        self.v.data.normal_(mean=0, std=stdv)
        self.concat = nn.Linear(hidden_size * 2, hidden_size)  
        self.U = nn.Linear(hidden_size, output_size)
        self.W = nn.Linear(hidden_size, 1)

        if USE_CUDA:
            self.embedding = self.embedding.cuda()
            self.embedding_dropout = self.embedding_dropout.cuda()
            self.lstm = self.lstm.cuda()
            self.W1 = self.W1.cuda() 
            self.v = self.v.cuda() 
            self.U = self.U.cuda() 
            self.W = self.W.cuda() 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:27,代碼來源:enc_PTRUNK.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, vocab_size, embed_size, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1, 
                 rnn_cell='GRU', variable_lengths=False, embedding=None, update_embedding=True):
        super(Encoder, self).__init__()
        
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.input_dropout = nn.Dropout(p=input_dropout_p)
        if rnn_cell == 'LSTM':
            self.rnn_cell = nn.LSTM
        elif rnn_cell == 'GRU':
            self.rnn_cell = nn.GRU
        else:
            raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
        
        self.variable_lengths = variable_lengths
        self.embedding = nn.Embedding(vocab_size, embed_size)
        if embedding is not None:
            self.embedding.weight = nn.Parameter(embedding)
        self.embedding.weight.requires_grad = update_embedding
        self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:23,代碼來源:usermodule.py

示例14: copy_state_dict

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def copy_state_dict(state_dict, model, strip=None):
    tgt_state = model.state_dict()
    copied_names = set()
    for name, param in state_dict.items():
        if strip is not None and name.startswith(strip):
            name = name[len(strip):]
        if name not in tgt_state:
            continue
        if isinstance(param, Parameter):
            param = param.data
        if param.size() != tgt_state[name].size():
            print('mismatch:', name, param.size(), tgt_state[name].size())
            continue
        tgt_state[name].copy_(param)
        copied_names.add(name)

    missing = set(tgt_state.keys()) - copied_names
    if len(missing) > 0:
        print("missing keys in state_dict:", missing)

    return model 
開發者ID:gddingcs,項目名稱:Dispersion-based-Clustering,代碼行數:23,代碼來源:serialization.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Parameter [as 別名]
def __init__(self, hidden_size, layer_norm=False, input_gate=True, forget_gate=True):
            nn.Module.__init__(self)
            self.hidden_size = hidden_size
            # gradient(2), param(2), loss
            self.lstm = nn.LSTMCell(input_size=5, hidden_size=hidden_size)
            if layer_norm:
                self.layer_norm = nn.LayerNorm(hidden_size)
            else:
                self.layer_norm = None
            self.input_gate = input_gate
            self.forget_gate = forget_gate
            if self.input_gate:
                self.lr_layer = nn.Linear(hidden_size, 1)
                self.lrs = []
            else:
                self.output_layer = nn.Linear(hidden_size, 1)
                self.dets = []
            if forget_gate:
                self.fg_layer = nn.Linear(hidden_size, 1)
                self.fgs = []
            self.h_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True))
            self.c_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True)) 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:24,代碼來源:meta.py


注:本文中的torch.nn.Parameter方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。