当前位置: 首页>>代码示例>>Python>>正文


Python init.calculate_gain方法代码示例

本文整理汇总了Python中torch.nn.init.calculate_gain方法的典型用法代码示例。如果您正苦于以下问题:Python init.calculate_gain方法的具体用法?Python init.calculate_gain怎么用?Python init.calculate_gain使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.init的用法示例。


在下文中一共展示了init.calculate_gain方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def __init__(self, in_shape, normalize):
        super().__init__()
        bias = not normalize
        self._nb_output_channel = 3136
        self.conv1 = Conv2d(in_shape[0], 32, 8, stride=4, padding=0, bias=bias)
        self.conv2 = Conv2d(32, 64, 4, stride=2, padding=0, bias=bias)
        self.conv3 = Conv2d(64, 64, 3, stride=1, padding=0, bias=bias)

        if normalize:
            self.bn1 = BatchNorm2d(32)
            self.bn2 = BatchNorm2d(64)
            self.bn3 = BatchNorm2d(64)
        else:
            self.bn1 = Identity()
            self.bn2 = Identity()
            self.bn3 = Identity()

        relu_gain = init.calculate_gain("relu")
        self.conv1.weight.data.mul_(relu_gain)
        self.conv2.weight.data.mul_(relu_gain)
        self.conv3.weight.data.mul_(relu_gain) 
开发者ID:heronsystems,项目名称:adeptRL,代码行数:23,代码来源:networks.py

示例2: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self):
        # init real weight
        fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.A)

        # init A
        gain = calculate_gain('leaky_relu', 0)
        std = gain / np.sqrt(fan_in)
        bound = np.sqrt(3.0) * std

        with torch.no_grad():
            # TODO: find more stable initial values
            self.A.uniform_(-bound * (1 / (np.pi ** 2)), bound * (1 / (np.pi ** 2)))
            #
            # B is initialized by pi
            # -pi and pi is too big, so it is powed by -1
            self.B.uniform_(-1 / np.pi, 1 / np.pi) 
开发者ID:AppleHolic,项目名称:source_separation,代码行数:18,代码来源:modules.py

示例3: init_param

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def init_param(self):
        if self.upsample:
            init.xavier_uniform_(self.W_branch.data, gain=init.calculate_gain('relu'))

        stdv = 1. / math.sqrt(self.out_feature)
        self.bias.data.uniform_(-stdv, stdv) 
开发者ID:seowok,项目名称:TreeGAN,代码行数:8,代码来源:gcn.py

示例4: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self):
        """Reinitialize learnable parameters."""
        gain = init.calculate_gain('relu')
        init.xavier_normal_(self.fc.weight, gain=gain)
        if isinstance(self.res_fc, nn.Linear):
            init.xavier_normal_(self.res_fc.weight, gain=gain)
        init.normal_(self.mu.data, 0, 0.1)
        init.constant_(self.inv_sigma.data, 1)
        if self.bias is not None:
            init.zeros_(self.bias.data) 
开发者ID:dmlc,项目名称:dgl,代码行数:12,代码来源:gmmconv.py

示例5: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self):
        """Reinitialize learnable parameters."""
        gain = init.calculate_gain('relu')
        if self.bias is not None:
            nn.init.zeros_(self.bias)
        if isinstance(self.res_fc, nn.Linear):
            nn.init.xavier_normal_(self.res_fc.weight, gain=gain) 
开发者ID:dmlc,项目名称:dgl,代码行数:9,代码来源:nnconv.py

示例6: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self):
        """Reinitialize learnable parameters."""
        gain = init.calculate_gain('relu')
        self.gru.reset_parameters()
        for linear in self.linears:
            init.xavier_normal_(linear.weight, gain=gain)
            init.zeros_(linear.bias) 
开发者ID:dmlc,项目名称:dgl,代码行数:9,代码来源:gatedgraphconv.py

示例7: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self):
        """Reinitialize learnable parameters."""
        if self.bias is not None:
            init.zeros_(self.bias)
        for i in range(self._k):
            init.xavier_normal_(self.W[i], init.calculate_gain('relu')) 
开发者ID:dmlc,项目名称:dgl,代码行数:8,代码来源:densechebconv.py

示例8: _initialize_weights

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def _initialize_weights(self):
        init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
        init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
        init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
        init.orthogonal_(self.conv4.weight) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:7,代码来源:super_resolution.py

示例9: xavier_uniform_relu

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def xavier_uniform_relu(modules):
    for m in modules:
        if isinstance(m, nn.Conv2d):
            init.xavier_uniform(m.weight.data, gain=init.calculate_gain('relu'))
            if m.bias is not None:
                m.bias.data.zero_()
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_() 
开发者ID:xdspacelab,项目名称:sscdnet,代码行数:11,代码来源:init.py

示例10: xavier_uniform_sigmoid

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def xavier_uniform_sigmoid(modules):
    for m in modules:
        if isinstance(m, nn.Conv2d):
            init.xavier_uniform(m.weight.data, gain=init.calculate_gain('sigmoid'))
            if m.bias is not None:
                m.bias.data.zero_()
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_() 
开发者ID:xdspacelab,项目名称:sscdnet,代码行数:11,代码来源:init.py

示例11: _weight_init

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def _weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.orthogonal_(m.weight, init.calculate_gain('relu')) 
开发者ID:martkartasev,项目名称:sepconv,代码行数:5,代码来源:model.py

示例12: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def __init__(self, in_shape, id, normalize):
        super().__init__(in_shape, id)
        bias = not normalize
        self._in_shape = in_shape
        self._out_shape = None
        self.conv1 = Conv2d(in_shape[0], 32, 7, stride=2, padding=1, bias=bias)
        self.conv2 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)
        self.conv3 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)
        self.conv4 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)

        if normalize == "bn":
            self.bn1 = BatchNorm2d(32)
            self.bn2 = BatchNorm2d(32)
            self.bn3 = BatchNorm2d(32)
            self.bn4 = BatchNorm2d(32)
        elif normalize == "gn":
            self.bn1 = GroupNorm(8, 32)
            self.bn2 = GroupNorm(8, 32)
            self.bn3 = GroupNorm(8, 32)
            self.bn4 = GroupNorm(8, 32)
        else:
            self.bn1 = Identity()
            self.bn2 = Identity()
            self.bn3 = Identity()
            self.bn4 = Identity()

        relu_gain = init.calculate_gain("relu")
        self.conv1.weight.data.mul_(relu_gain)
        self.conv2.weight.data.mul_(relu_gain)
        self.conv3.weight.data.mul_(relu_gain)
        self.conv4.weight.data.mul_(relu_gain) 
开发者ID:heronsystems,项目名称:adeptRL,代码行数:33,代码来源:four_conv.py

示例13: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def __init__(self, c_in, c_out, k_size, stride, pad, initializer='kaiming', bias=False):
        super(equalized_conv2d, self).__init__()
        self.conv = nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False)
        if initializer == 'kaiming':    kaiming_normal(self.conv.weight, a=calculate_gain('conv2d'))
        elif initializer == 'xavier':   xavier_normal(self.conv.weight)
        
        conv_w = self.conv.weight.data.clone()
        self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
        self.scale = (torch.mean(self.conv.weight.data ** 2)) ** 0.5
        self.conv.weight.data.copy_(self.conv.weight.data/self.scale) 
开发者ID:nashory,项目名称:pggan-pytorch,代码行数:12,代码来源:custom_layers.py

示例14: reset_parameters

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def reset_parameters(self) -> None:
        # Embeddings
        for name in 'word pos nt action'.split():
            embedding = getattr(self, f'{name}_embedding')
            embedding.reset_parameters()

        # Encoders
        for name in 'stack buffer history'.split():
            encoder = getattr(self, f'{name}_encoder')
            encoder.reset_parameters()

        # Compositions
        for name in 'fwd bwd'.split():
            lstm = getattr(self, f'{name}_composer')
            for pname, pval in lstm.named_parameters():
                if pname.startswith('weight'):
                    init.orthogonal(pval)
                else:
                    assert pname.startswith('bias')
                    init.constant(pval, 0.)

        # Transformations
        gain = init.calculate_gain('relu')
        for name in 'word nt action'.split():
            layer = getattr(self, f'{name}2encoder')
            init.xavier_uniform(layer[0].weight, gain=gain)
            init.constant(layer[0].bias, 1.)
        init.xavier_uniform(self.fwdbwd2composed[0].weight, gain=gain)
        init.constant(self.fwdbwd2composed[0].bias, 1.)
        init.xavier_uniform(self.encoders2summary[1].weight, gain=gain)
        init.constant(self.encoders2summary[1].bias, 1.)
        init.xavier_uniform(self.summary2actionlogprobs.weight)
        init.constant(self.summary2actionlogprobs.bias, 0.)

        # Guards
        for name in 'stack buffer history'.split():
            guard = getattr(self, f'{name}_guard')
            init.constant(guard, 0.) 
开发者ID:kmkurn,项目名称:pytorch-rnng,代码行数:40,代码来源:models.py

示例15: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import calculate_gain [as 别名]
def __init__(self, c_in, c_out, k_size, stride, pad, bias=False):
        super(EqualizedConv2d, self).__init__()
        self.conv = nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False)
        kaiming_normal(self.conv.weight, a=calculate_gain('conv2d'))
        # Scaling the weights for equalized learning
        conv_w = self.conv.weight.data.clone()
        self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
        self.scale = (torch.mean(self.conv.weight.data ** 2)) ** 0.5
        self.conv.weight.data.copy_(self.conv.weight.data / self.scale)     # for equalized learning rate 
开发者ID:rahulbhalley,项目名称:progressive-growing-of-gans.pytorch,代码行数:11,代码来源:custom_layers.py


注:本文中的torch.nn.init.calculate_gain方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。