当前位置: 首页>>代码示例>>Python>>正文


Python parameter.Parameter方法代码示例

本文整理汇总了Python中torch.nn.parameter.Parameter方法的典型用法代码示例。如果您正苦于以下问题:Python parameter.Parameter方法的具体用法?Python parameter.Parameter怎么用?Python parameter.Parameter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.parameter的用法示例。


在下文中一共展示了parameter.Parameter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
        super(MultiHeadGraphAttention, self).__init__()
        self.n_head = n_head
        self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
        self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
        self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(attn_dropout)

        if bias:
            self.bias = Parameter(torch.Tensor(f_out))
            init.constant_(self.bias, 0)
        else:
            self.register_parameter('bias', None)

        init.xavier_uniform_(self.w)
        init.xavier_uniform_(self.a_src)
        init.xavier_uniform_(self.a_dst) 
开发者ID:xptree,项目名称:DeepInf,代码行数:22,代码来源:gat_layers.py

示例2: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, in_features, out_features, bias=True, init='xavier'):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        if init == 'uniform':
            print("| Uniform Initialization")
            self.reset_parameters_uniform()
        elif init == 'xavier':
            print("| Xavier Initialization")
            self.reset_parameters_xavier()
        elif init == 'kaiming':
            print("| Kaiming Initialization")
            self.reset_parameters_kaiming()
        else:
            raise NotImplementedError 
开发者ID:meliketoy,项目名称:graph-cnn.pytorch,代码行数:22,代码来源:layers.py

示例3: load_my_state_dict

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def load_my_state_dict(self, state_dict, seq_len):
        own_state = self.state_dict()
        # print(own_state.keys())
        for name, param in state_dict.items():
            # pdb.set_trace()
            if name in own_state.keys():
                # print(name)
                if isinstance(param, Parameter):
                    # backwards compatibility for serialized parameters
                    param = param.data
                if name == 'conv1.weight':
                    print(name, 'is being filled with {:d} seq_len\n'.format(seq_len))
                    param = param.repeat(1, seq_len, 1, 1)
                    param = param / float(seq_len)
                try:
                    own_state[name].copy_(param)
                except Exception:
                    raise RuntimeError('While copying the parameter named {}, '
                                       'whose dimensions in the model are {} and '
                                       'whose dimensions in the checkpoint are {}.'
                                       .format(name, own_state[name].size(), param.size()))
            else:
                print('NAME IS NOT IN OWN STATE::>' + name) 
开发者ID:gurkirt,项目名称:2D-kinectics,代码行数:25,代码来源:resnet.py

示例4: load_my_state_dict

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def load_my_state_dict(self, state_dict, seq_len):
        own_state = self.state_dict()
        #print(own_state.keys())
        #pdb.set_trace()
        for name, param in state_dict.items():
            if name in own_state.keys():
                if isinstance(param, Parameter):
                    # backwards compatibility for serialized parameters
                    param = param.data

                if name.find('Conv2d_1a_3x3') > -1 and not name.find('bn') > -1:
                    param = param.repeat(1, seq_len, 1, 1)
                    param = param / float(seq_len)

                try:
                    own_state[name].copy_(param)
                except Exception:
                    raise RuntimeError('While copying the parameter named {}, '
                                       'whose dimensions in the model are {} and '
                                       'whose dimensions in the checkpoint are {}.'
                                       .format(name, own_state[name].size(), param.size())) 
开发者ID:gurkirt,项目名称:2D-kinectics,代码行数:23,代码来源:inceptionv3.py

示例5: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, num_nodes, embedding_size, embedding_u_size, edge_types, edge_type_count, dim_a):
        super(DGLGATNE, self).__init__()
        self.num_nodes = num_nodes
        self.embedding_size = embedding_size
        self.embedding_u_size = embedding_u_size
        self.edge_types = edge_types
        self.edge_type_count = edge_type_count
        self.dim_a = dim_a

        self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        self.node_type_embeddings = Parameter(torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size))
        self.trans_weights = Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size))
        self.trans_weights_s1 = Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, dim_a))
        self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))

        self.reset_parameters() 
开发者ID:dmlc,项目名称:dgl,代码行数:18,代码来源:main.py

示例6: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, X, Y, hidden_layer_sizes):
        super(Net, self).__init__()

        # Initialize linear layer with least squares solution
        X_ = np.hstack([X, np.ones((X.shape[0],1))])
        Theta = np.linalg.solve(X_.T.dot(X_), X_.T.dot(Y))
        
        self.lin = nn.Linear(X.shape[1], Y.shape[1])
        W,b = self.lin.parameters()
        W.data = torch.Tensor(Theta[:-1,:].T)
        b.data = torch.Tensor(Theta[-1,:])
        
        # Set up non-linear network of 
        # Linear -> BatchNorm -> ReLU -> Dropout layers
        layer_sizes = [X.shape[1]] + hidden_layer_sizes
        layers = reduce(operator.add, 
            [[nn.Linear(a,b), nn.BatchNorm1d(b), nn.ReLU(), nn.Dropout(p=0.2)] 
                for a,b in zip(layer_sizes[0:-1], layer_sizes[1:])])
        layers += [nn.Linear(layer_sizes[-1], Y.shape[1])]
        self.net = nn.Sequential(*layers)
        self.sig = Parameter(torch.ones(1, Y.shape[1]).cuda()) 
开发者ID:locuslab,项目名称:e2e-model-learning,代码行数:23,代码来源:model_classes.py

示例7: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, L=3, lamb=5):
        super(RTML, self).__init__()
        self.L = L
        self.N = len(att_names)
        self.lamb = lamb
        self.theta = Parameter(torch.Tensor(self.L, 300, 300))
        self.alpha = Parameter(torch.Tensor(self.N, self.L+1)) # L+1 is so to parameterize
                                                               # being smaller than norm lamb
        self.reset_parameters()

        self.att_emb = nn.Embedding(self.N, 300)
        if PREINIT:
            self.att_emb.weight.data = _load_vectors(att_names).cuda()
        else:
            _np_emb = np.random.randn(self.N, 300)
            _np_emb = _np_emb / np.square(_np_emb).sum(1)[:, None]
            self.att_emb.weight.data = torch.FloatTensor(_np_emb).cuda() 
开发者ID:uwnlp,项目名称:verb-attributes,代码行数:19,代码来源:train_eval_rtml.py

示例8: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
                 track_running_stats=True):
        super(_BatchNorm, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats
        if self.affine:
            self.weight = Parameter(torch.Tensor(num_features))
            self.bias = Parameter(torch.Tensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
            self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
        else:
            self.register_parameter('running_mean', None)
            self.register_parameter('running_var', None)
            self.register_parameter('num_batches_tracked', None)
        self.reset_parameters() 
开发者ID:sxhxliang,项目名称:BigGAN-pytorch,代码行数:25,代码来源:CrossReplicaBN.py

示例9: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, model=None, nnodes=None, feature_shape=None, lambda_=0.5, attack_structure=True, attack_features=False, device='cpu'):

        super(BaseMeta, self).__init__(model, nnodes, attack_structure, attack_features, device)
        self.lambda_ = lambda_

        assert attack_features or attack_structure, 'attack_features or attack_structure cannot be both False'

        self.modified_adj = None
        self.modified_features = None

        if attack_structure:
            assert nnodes is not None, 'Please give nnodes='
            self.adj_changes = Parameter(torch.FloatTensor(nnodes, nnodes))
            self.adj_changes.data.fill_(0)

        if attack_features:
            assert feature_shape is not None, 'Please give feature_shape='
            self.feature_changes = Parameter(torch.FloatTensor(feature_shape))
            self.feature_changes.data.fill_(0)

        self.with_relu = model.with_relu 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:23,代码来源:mettack.py

示例10: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, model=None, nnodes=None, loss_type='CE', feature_shape=None, attack_structure=True, attack_features=False, device='cpu'):

        super(PGDAttack, self).__init__(model, nnodes, attack_structure, attack_features, device)

        assert attack_features or attack_structure, 'attack_features or attack_structure cannot be both False'

        self.loss_type = loss_type
        self.modified_adj = None
        self.modified_features = None

        if attack_structure:
            assert nnodes is not None, 'Please give nnodes='
            self.adj_changes = Parameter(torch.FloatTensor(int(nnodes*(nnodes-1)/2)))
            self.adj_changes.data.fill_(0)

        if attack_features:
            assert True, 'Topology Attack does not support attack feature'

        self.complementary = None 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:21,代码来源:topology_attack.py

示例11: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
                 track_running_stats=True):
        super(_BatchNorm, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats
        self.freezed = False
        if self.affine:
            self.weight = Parameter(torch.Tensor(num_features))
            self.bias = Parameter(torch.Tensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
        else:
            self.register_parameter('running_mean', None)
            self.register_parameter('running_var', None)
        self.reset_parameters() 
开发者ID:tamakoji,项目名称:pytorch-syncbn,代码行数:24,代码来源:syncbn.py

示例12: get_params

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def get_params(pretrained_model):
    pretrained_checkpoint = load_checkpoint(pretrained_model)
    for name, param in pretrained_checkpoint.items():
    #for name, param in pretrained_checkpoint['state_dict'].items():
        print('pretrained_model params name and size: ', name, param.size())
        if isinstance(param, Parameter):
            # backwards compatibility for serialized parameters
            param = param.data
        try:
            np.save(name+'.npy', param.cpu().numpy())
            print('############# new_model load params name: ',name)
        except:
            raise RuntimeError('While copying the parameter named {}, \
                               whose dimensions in the model are {} and \
                               whose dimensions in the checkpoint are {}.'
                               .format(name, new_model_dict[name].size(), param.size())) 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:18,代码来源:main.py

示例13: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, quant_values=[-1, 0, 1], quan_bias=[0], init_beta=0.0):
        super(Quantization, self).__init__()
        """register_parameter: params w/ grad, and need to be learned
            register_buffer: params w/o grad, do not need to be learned
            example shown in: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
        """
        self.values = quant_values
        # number of sigmoids
        self.n = len(self.values) - 1 
        self.alpha = Parameter(torch.Tensor([1]))
        self.beta = Parameter(torch.Tensor([1]))
        self.register_buffer('biases', torch.zeros(self.n))
        self.register_buffer('scales', torch.zeros(self.n))
          
        boundary = np.array(quan_bias)
        self.init_scale_and_offset()
        self.bias_inited = False
        self.alpha_beta_inited = False
        self.init_biases(boundary)
        self.init_alpha_and_beta(init_beta) 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:22,代码来源:quantization.py

示例14: load_params

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def load_params(new_model, pretrained_model):
    #new_model_dict = new_model.module.state_dict()
    new_model_dict = new_model.state_dict()
    pretrained_checkpoint = load_checkpoint(pretrained_model)
    #for name, param in pretrained_checkpoint.items():
    for name, param in pretrained_checkpoint['state_dict'].items():
        print('pretrained_model params name and size: ', name, param.size())
        if name in new_model_dict:
            if isinstance(param, Parameter):
                # backwards compatibility for serialized parameters
                param = param.data
            try:
                new_model_dict[name].copy_(param)
                print('############# new_model load params name: ',name)
            except:
                raise RuntimeError('While copying the parameter named {}, \
                                   whose dimensions in the model are {} and \
                                   whose dimensions in the checkpoint are {}.'
                                   .format(name, new_model_dict[name].size(), param.size()))
        else:
            continue 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:23,代码来源:quan_all_main.py

示例15: __init__

# 需要导入模块: from torch.nn import parameter [as 别名]
# 或者: from torch.nn.parameter import Parameter [as 别名]
def __init__(self, in_features, out_features, initial_sparsity, bias = True , sparse = True ):
        super(DynamicLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.initial_sparsity = initial_sparsity
        self.sparse = sparse
        
        if sparse:
            self.d_tensor = SparseTensor([out_features,in_features],initial_sparsity = initial_sparsity)
        else:
            self.d_tensor = TiedTensor([out_features,in_features],initial_sparsity = initial_sparsity)
            
        if bias:
            self.bias = Parameter(torch.Tensor(out_features))
        else:
            self.bias = None

        self.init_parameters()
#        self.weight = self.d_tensor.s_tensor 
开发者ID:IntelAI,项目名称:dynamic-reparameterization,代码行数:21,代码来源:reparameterized_layers.py


注:本文中的torch.nn.parameter.Parameter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。