當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.InstanceNorm1d方法代碼示例

本文整理匯總了Python中torch.nn.InstanceNorm1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.InstanceNorm1d方法的具體用法?Python nn.InstanceNorm1d怎麽用?Python nn.InstanceNorm1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.InstanceNorm1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_norm_layer

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def build_norm_layer(norm_type, param=None, num_feats=None):
    if norm_type == 'bnorm':
        return nn.BatchNorm1d(num_feats)
    elif norm_type == 'snorm':
        spectral_norm(param)
        return None
    elif norm_type == 'bsnorm':
        spectral_norm(param)
        return nn.BatchNorm1d(num_feats)
    elif norm_type == 'lnorm':
        return nn.LayerNorm(num_feats)
    elif norm_type == 'wnorm':
        weight_norm(param)
        return None
    elif norm_type == 'inorm':
        return nn.InstanceNorm1d(num_feats, affine=False)
    elif norm_type == 'affinorm':
        return nn.InstanceNorm1d(num_feats, affine=True)
    elif norm_type is None:
        return None
    else:
        raise TypeError('Unrecognized norm type: ', norm_type) 
開發者ID:santi-pdp,項目名稱:pase,代碼行數:24,代碼來源:modules.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
        super(DenseBlock, self).__init__()
        self.fc = nn.Linear(input_size, output_size, bias=bias)

        self.norm = norm
        if self.norm =='batch':
            self.bn = nn.BatchNorm1d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm1d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
開發者ID:alterzero,項目名稱:STARnet,代碼行數:23,代碼來源:base_networks.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.01)
        elif 'selu' == self.activation:
            self.act = nn.SELU()

        self.weight_init() 
開發者ID:lijx10,項目名稱:USIP,代碼行數:24,代碼來源:layers.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, input_size, inplace=True, has_bias=True, learn_affine=True):
        """Init.

        Args:
            input_size (float): size of input
            inplace (bool, optional): Defaults to True. LeakyReLU inplace?
            has_bias (bool, optional): Defaults to True. Conv1d bias?
            learn_affine (bool, optional): Defaults to True. InstanceNorm1d affine?
        """

        super(LDFWeightEstimatorNet, self).__init__()

        track = False
        self.conv_in = nn.Conv1d(input_size, 128, kernel_size=1, bias=has_bias)

        blocks = []
        for i in range(12):
            blocks.append(ResNetBlock())

        self.backbone = nn.Sequential(*blocks)

        self.conv_out = nn.Conv1d(128, 1, kernel_size=1, bias=has_bias) 
開發者ID:intel-isl,項目名稱:DFE,代碼行數:24,代碼來源:normalized_eight_point_net.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, c_in=512, c_out=513, c_h=512, c_a=8, ns=0.2, seg_len=64):
		super(Spectrogram_Patcher, self).__init__()
		self.ns = ns
		self.seg_len = seg_len
		self.input_layer = nn.Linear(c_in, c_h)
		self.dense1 = nn.Linear(c_h, c_h)
		self.dense2 = nn.Linear(c_h, c_h)
		self.dense3 = nn.Linear(c_h, c_h)
		self.dense4 = nn.Linear(c_h, c_h)
		self.RNN = nn.GRU(input_size=c_h, hidden_size=c_h//2, num_layers=1, bidirectional=True)
		self.dense5 = nn.Linear(2*c_h + c_h, c_h)
		self.linear = nn.Linear(c_h, c_out)
		# normalization layer
		self.ins_norm1 = nn.InstanceNorm1d(c_h)
		self.ins_norm2 = nn.InstanceNorm1d(c_h)
		# embedding layer
		self.emb1 = nn.Embedding(c_a, c_h)
		self.emb2 = nn.Embedding(c_a, c_h) 
開發者ID:andi611,項目名稱:ZeroSpeech-TTS-without-T,代碼行數:20,代碼來源:model.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.1)

        self.weight_init() 
開發者ID:lijx10,項目名稱:SO-Net,代碼行數:22,代碼來源:layers.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
        super().__init__()
        self.c_dim = c_dim
        self.f_dim = f_dim
        self.norm_method = norm_method
        # Submodules
        self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
        self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
        if norm_method == 'batch_norm':
            self.bn = nn.BatchNorm1d(f_dim, affine=False)
        elif norm_method == 'instance_norm':
            self.bn = nn.InstanceNorm1d(f_dim, affine=False)
        elif norm_method == 'group_norm':
            self.bn = nn.GroupNorm1d(f_dim, affine=False)
        else:
            raise ValueError('Invalid normalization method!')
        self.reset_parameters() 
開發者ID:autonomousvision,項目名稱:occupancy_flow,代碼行數:19,代碼來源:layers.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'sigmoid' == activation:
            self.act = nn.Sigmoid()
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.1)

        self.weight_init() 
開發者ID:iSarmad,項目名稱:RL-GAN-Net,代碼行數:22,代碼來源:layers.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 16, 8, 3)
        self.gnorm1 = nn.GroupNorm(4, 16)
        self.conv2 = nn.Conv1d(16, 32, 3, 1)
        self.lnorm1 = nn.LayerNorm((32, 23))
        self.conv3 = nn.Conv1d(32, 32, 3, 1)
        self.instnorm1 = nn.InstanceNorm1d(32, affine=True)
        self.convf = nn.Conv1d(32, 32, 1, 1)
        for p in self.convf.parameters():
            p.requires_grad = False
        self.fc1 = nn.Linear(21, 17)
        self.lnorm2 = nn.LayerNorm(17)
        self.fc2 = nn.Linear(32 * 17, 10)

        for layer in (self.gnorm1, self.lnorm1, self.lnorm2, self.instnorm1):
            nn.init.uniform_(layer.weight)
            nn.init.uniform_(layer.bias) 
開發者ID:facebookresearch,項目名稱:pytorch-dp,代碼行數:20,代碼來源:privacy_engine_test.py

示例10: fcReLU

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def fcReLU(in_ch,out_ch,norm,dropout=None,relu=True):
    fc = nn.Linear(in_ch,out_ch)
    if 'weight' in norm:
        layers = [weight_norm(fc)]
    else:
        layers = [fc]
    if 'batch' in norm:
        layers.append(nn.BatchNorm1d(out_ch))
    elif 'instance' in norm:
        layers.append(nn.InstanceNorm1d(out_ch))
    elif 'group' in norm:
        layers.append(nn.GroupNorm(getGroupSize(out_ch),out_ch))
    if dropout is not None:
        if dropout != False:
            layers.append(nn.Dropout(p=0.1,inplace=True))
    if relu:
        layers += [nn.ReLU(inplace=True)]
    return layers 
開發者ID:herobd,項目名稱:Visual-Template-Free-Form-Parsing,代碼行數:20,代碼來源:net_builder.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, n_units, dropout, pretrained_emb, vertex_feature,
            use_vertex_feature, fine_tune=False, instance_normalization=False):
        super(BatchGCN, self).__init__()
        self.num_layer = len(n_units) - 1
        self.dropout = dropout
        self.inst_norm = instance_normalization
        if self.inst_norm:
            self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)

        # https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
        self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
        self.embedding.weight = nn.Parameter(pretrained_emb)
        self.embedding.weight.requires_grad = fine_tune
        n_units[0] += pretrained_emb.size(1)

        self.use_vertex_feature = use_vertex_feature
        if self.use_vertex_feature:
            self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
            self.vertex_feature.weight = nn.Parameter(vertex_feature)
            self.vertex_feature.weight.requires_grad = False
            n_units[0] += vertex_feature.size(1)

        self.layer_stack = nn.ModuleList()

        for i in range(self.num_layer):
            self.layer_stack.append(
                    BatchGraphConvolution(n_units[i], n_units[i + 1])
                    ) 
開發者ID:xptree,項目名稱:DeepInf,代碼行數:30,代碼來源:gcn.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, n_units, dropout, pretrained_emb, vertex_feature,
            use_vertex_feature, instance_normalization,
            neighbor_size, sequence_size, fine_tune=False):
        super(BatchPSCN, self).__init__()
        assert len(n_units) == 4
        self.dropout = dropout
        self.inst_norm = instance_normalization
        if self.inst_norm:
            self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)

        # https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
        self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
        self.embedding.weight = nn.Parameter(pretrained_emb)
        self.embedding.weight.requires_grad = fine_tune
        n_units[0] += pretrained_emb.size(1)

        self.use_vertex_feature = use_vertex_feature
        if self.use_vertex_feature:
            self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
            self.vertex_feature.weight = nn.Parameter(vertex_feature)
            self.vertex_feature.weight.requires_grad = False
            n_units[0] += vertex_feature.size(1)

        # input is of shape bs x num_feature x l where l = w*k
        # after conv1, shape=(bs x ? x w)
        # after conv2 shape=(bs x ? x w/2)
        self.conv1 = nn.Conv1d(in_channels=n_units[0],
                    out_channels=n_units[1], kernel_size=neighbor_size,
                    stride=neighbor_size)
        k = 1
        self.conv2 = nn.Conv1d(in_channels=n_units[1],
                    out_channels=n_units[2], kernel_size=k, stride=1)
        self.fc = nn.Linear(in_features=n_units[2] * (sequence_size - k + 1),
                    out_features=n_units[3]) 
開發者ID:xptree,項目名稱:DeepInf,代碼行數:36,代碼來源:pscn.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, pretrained_emb, vertex_feature, use_vertex_feature,
            n_units=[1433, 8, 7], n_heads=[8, 1],
            dropout=0.1, attn_dropout=0.0, fine_tune=False,
            instance_normalization=False):
        super(BatchGAT, self).__init__()
        self.n_layer = len(n_units) - 1
        self.dropout = dropout
        self.inst_norm = instance_normalization
        if self.inst_norm:
            self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)

        # https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
        self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
        self.embedding.weight = nn.Parameter(pretrained_emb)
        self.embedding.weight.requires_grad = fine_tune
        n_units[0] += pretrained_emb.size(1)

        self.use_vertex_feature = use_vertex_feature
        if self.use_vertex_feature:
            self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
            self.vertex_feature.weight = nn.Parameter(vertex_feature)
            self.vertex_feature.weight.requires_grad = False
            n_units[0] += vertex_feature.size(1)

        self.layer_stack = nn.ModuleList()
        for i in range(self.n_layer):
            # consider multi head from last layer
            f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
            self.layer_stack.append(
                    BatchMultiHeadGraphAttention(n_heads[i], f_in=f_in,
                        f_out=n_units[i + 1], attn_dropout=attn_dropout)
                    ) 
開發者ID:xptree,項目名稱:DeepInf,代碼行數:34,代碼來源:gat.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(self, pools, convs, fcs=None, blocks=0, global_pool=None):
        super(MeshEncoder, self).__init__()
        self.fcs = None
        self.convs = []
        for i in range(len(convs) - 1):
            if i + 1 < len(pools):
                pool = pools[i + 1]
            else:
                pool = 0
            self.convs.append(DownConv(convs[i], convs[i + 1], blocks=blocks, pool=pool))
        self.global_pool = None
        if fcs is not None:
            self.fcs = []
            self.fcs_bn = []
            last_length = convs[-1]
            if global_pool is not None:
                if global_pool == 'max':
                    self.global_pool = nn.MaxPool1d(pools[-1])
                elif global_pool == 'avg':
                    self.global_pool = nn.AvgPool1d(pools[-1])
                else:
                    assert False, 'global_pool %s is not defined' % global_pool
            else:
                last_length *= pools[-1]
            if fcs[0] == last_length:
                fcs = fcs[1:]
            for length in fcs:
                self.fcs.append(nn.Linear(last_length, length))
                self.fcs_bn.append(nn.InstanceNorm1d(length))
                last_length = length
            self.fcs = nn.ModuleList(self.fcs)
            self.fcs_bn = nn.ModuleList(self.fcs_bn)
        self.convs = nn.ModuleList(self.convs)
        reset_params(self) 
開發者ID:ranahanocka,項目名稱:MeshCNN,代碼行數:36,代碼來源:networks.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import InstanceNorm1d [as 別名]
def __init__(
            self,
            in_size: int,
            out_size: int,
            *,
            kernel_size: int = 1,
            stride: int = 1,
            padding: int = 0,
            activation=nn.ReLU(inplace=True),
            bn: bool = False,
            init=nn.init.kaiming_normal_,
            bias: bool = True,
            preact: bool = False,
            name: str = "",
            instance_norm=False
    ):
        super().__init__(
            in_size,
            out_size,
            kernel_size,
            stride,
            padding,
            activation,
            bn,
            init,
            conv=nn.Conv1d,
            batch_norm=BatchNorm1d,
            bias=bias,
            preact=preact,
            name=name,
            instance_norm=instance_norm,
            instance_norm_func=nn.InstanceNorm1d
        ) 
開發者ID:daveredrum,項目名稱:Pointnet2.ScanNet,代碼行數:35,代碼來源:pytorch_utils.py


注:本文中的torch.nn.InstanceNorm1d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。