当前位置: 首页>>代码示例>>Python>>正文


Python weight_norm.weight_norm方法代码示例

本文整理汇总了Python中torch.nn.utils.weight_norm.weight_norm方法的典型用法代码示例。如果您正苦于以下问题:Python weight_norm.weight_norm方法的具体用法?Python weight_norm.weight_norm怎么用?Python weight_norm.weight_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.utils.weight_norm的用法示例。


在下文中一共展示了weight_norm.weight_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout=0, bias=True):
        super(FCNet, self).__init__()

        layers = []
        for i in range(len(dims)-2):
            in_dim = dims[i]
            out_dim = dims[i+1]
            if 0 < dropout:
                layers.append(nn.Dropout(dropout))
            layers.append(weight_norm(nn.Linear(in_dim, out_dim, bias=bias),
                                      dim=None))
            if '' != act and act is not None:
                layers.append(getattr(nn, act)())
        if 0 < dropout:
            layers.append(nn.Dropout(dropout))
        layers.append(weight_norm(nn.Linear(dims[-2], dims[-1], bias=bias),
                                  dim=None))
        if '' != act and act is not None:
            layers.append(getattr(nn, act)())

        self.main = nn.Sequential(*layers) 
开发者ID:linjieli222,项目名称:VQA_ReGAT,代码行数:23,代码来源:fc.py

示例2: make_layers

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def make_layers(cfg, batch_norm=False, weight_norm=False):
    layers = []
    in_channels = 3
    for i,v in enumerate(cfg):
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if i == len(cfg)-1:
                layers += [conv2d]
                break
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            elif weight_norm:
                layers += [weight_norm(conv2d), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers) 
开发者ID:herobd,项目名称:Visual-Template-Free-Form-Parsing,代码行数:21,代码来源:vgg.py

示例3: fcReLU

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def fcReLU(in_ch,out_ch,norm,dropout=None,relu=True):
    fc = nn.Linear(in_ch,out_ch)
    if 'weight' in norm:
        layers = [weight_norm(fc)]
    else:
        layers = [fc]
    if 'batch' in norm:
        layers.append(nn.BatchNorm1d(out_ch))
    elif 'instance' in norm:
        layers.append(nn.InstanceNorm1d(out_ch))
    elif 'group' in norm:
        layers.append(nn.GroupNorm(getGroupSize(out_ch),out_ch))
    if dropout is not None:
        if dropout != False:
            layers.append(nn.Dropout(p=0.1,inplace=True))
    if relu:
        layers += [nn.ReLU(inplace=True)]
    return layers 
开发者ID:herobd,项目名称:Visual-Template-Free-Form-Parsing,代码行数:20,代码来源:net_builder.py

示例4: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout=0):
        super(FCNet, self).__init__()

        layers = []
        for i in range(len(dims)-2):
            in_dim = dims[i]
            out_dim = dims[i+1]
            if 0 < dropout:
                layers.append(nn.Dropout(dropout))
            layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
            if ''!=act:
                layers.append(getattr(nn, act)())
        if 0 < dropout:
            layers.append(nn.Dropout(dropout))
        layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
        if ''!=act:
            layers.append(getattr(nn, act)())

        self.main = nn.Sequential(*layers) 
开发者ID:jnhwkim,项目名称:ban-vqa,代码行数:21,代码来源:fc.py

示例5: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=[.2,.5], k=3):
        super(BCNet, self).__init__()
        
        self.c = 32
        self.k = k
        self.v_dim = v_dim; self.q_dim = q_dim
        self.h_dim = h_dim; self.h_out = h_out

        self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout[0])
        self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout[0])
        self.dropout = nn.Dropout(dropout[1]) # attention
        if 1 < k:
            self.p_net = nn.AvgPool1d(self.k, stride=self.k)
        
        if None == h_out:
            pass
        elif h_out <= self.c:
            self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
            self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
        else:
            self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None) 
开发者ID:jnhwkim,项目名称:ban-vqa,代码行数:23,代码来源:bc.py

示例6: EncoderImage

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def EncoderImage(data_name, img_dim, embed_size, precomp_enc_type='basic', no_imgnorm=False):
    """A wrapper to image encoders. Chooses between an different encoders
    that uses precomputed image features."""
    if precomp_enc_type == 'basic':
        img_enc = EncoderImagePrecomp(
            img_dim, embed_size, no_imgnorm)
    elif precomp_enc_type == 'weight_norm':
        img_enc = EncoderImageWeightNormPrecomp(
            img_dim, embed_size, no_imgnorm)
    else:
        raise ValueError("Unknown precomp_enc_type: {}".format(precomp_enc_type))

    return img_enc 
开发者ID:aimagelab,项目名称:speaksee,代码行数:15,代码来源:scan.py

示例7: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, img_dim, embed_size, no_imgnorm=False):
        super(EncoderImageWeightNormPrecomp, self).__init__()
        self.embed_size = embed_size
        self.no_imgnorm = no_imgnorm
        self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) 
开发者ID:aimagelab,项目名称:speaksee,代码行数:7,代码来源:scan.py

示例8: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout_r=0.0):
        super(MLP, self).__init__()

        layers = []
        for i in range(len(dims) - 1):
            in_dim = dims[i]
            out_dim = dims[i + 1]
            if dropout_r > 0:
                layers.append(nn.Dropout(dropout_r))
            layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
            if act != '':
                layers.append(getattr(nn, act)())

        self.mlp = nn.Sequential(*layers) 
开发者ID:MILVLG,项目名称:openvqa,代码行数:16,代码来源:ban.py

示例9: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ELU', dropout_r=0.0):
        super(MLP, self).__init__()

        layers = []
        for i in range(len(dims) - 1):
            in_dim = dims[i]
            out_dim = dims[i + 1]
            if dropout_r > 0:
                layers.append(nn.Dropout(dropout_r))
            layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
            if act != '':
                layers.append(getattr(nn, act)())

        self.mlp = nn.Sequential(*layers) 
开发者ID:MILVLG,项目名称:openvqa,代码行数:16,代码来源:tda.py

示例10: EncoderImage

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def EncoderImage(data_name, img_dim, embed_size, precomp_enc_type='basic', 
                 no_imgnorm=False):
    """A wrapper to image encoders. Chooses between an different encoders
    that uses precomputed image features.
    """
    if precomp_enc_type == 'basic':
        img_enc = EncoderImagePrecomp(
            img_dim, embed_size, no_imgnorm)
    elif precomp_enc_type == 'weight_norm':
        img_enc = EncoderImageWeightNormPrecomp(
            img_dim, embed_size, no_imgnorm)
    else:
        raise ValueError("Unknown precomp_enc_type: {}".format(precomp_enc_type))

    return img_enc 
开发者ID:kuanghuei,项目名称:SCAN,代码行数:17,代码来源:model.py

示例11: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[.2, .5]):
        super(BiAttention, self).__init__()

        self.glimpse = glimpse
        self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse,
                                        dropout=dropout, k=3),
                                  name='h_mat', dim=None) 
开发者ID:linjieli222,项目名称:VQA_ReGAT,代码行数:9,代码来源:bilinear_attention.py

示例12: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, feat_dim, nongt_dim=20, pos_emb_dim=-1,
                 num_heads=16, dropout=[0.2, 0.5]):
        """ Attetion module with vectorized version

        Args:
            position_embedding: [num_rois, nongt_dim, pos_emb_dim]
                                used in implicit relation
            pos_emb_dim: set as -1 if explicit relation
            nongt_dim: number of objects consider relations per image
            fc_dim: should be same as num_heads
            feat_dim: dimension of roi_feat
            num_heads: number of attention heads
        Returns:
            output: [num_rois, ovr_feat_dim, output_dim]
        """
        super(GraphSelfAttentionLayer, self).__init__()
        # multi head
        self.fc_dim = num_heads
        self.feat_dim = feat_dim
        self.dim = (feat_dim, feat_dim, feat_dim)
        self.dim_group = (int(self.dim[0] / num_heads),
                          int(self.dim[1] / num_heads),
                          int(self.dim[2] / num_heads))
        self.num_heads = num_heads
        self.pos_emb_dim = pos_emb_dim
        if self.pos_emb_dim > 0:
            self.pair_pos_fc1 = FCNet([pos_emb_dim, self.fc_dim], None, dropout[0])
        self.query = FCNet([feat_dim, self.dim[0]], None, dropout[0])
        self.nongt_dim = nongt_dim

        self.key = FCNet([feat_dim, self.dim[1]], None, dropout[0])

        self.linear_out_ = weight_norm(
                            nn.Conv2d(in_channels=self.fc_dim * feat_dim,
                                      out_channels=self.dim[2],
                                      kernel_size=(1, 1),
                                      groups=self.fc_dim), dim=None) 
开发者ID:linjieli222,项目名称:VQA_ReGAT,代码行数:39,代码来源:graph_att_layer.py

示例13: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU',
                 dropout=[.2, .5], k=3):
        super(BCNet, self).__init__()

        self.c = 32
        self.k = k
        self.v_dim = v_dim
        self.q_dim = q_dim
        self.h_dim = h_dim
        self.h_out = h_out

        self.v_net = FCNet([v_dim, h_dim * self.k], act=act,
                           dropout=dropout[0])
        self.q_net = FCNet([q_dim, h_dim * self.k], act=act,
                           dropout=dropout[0])
        self.dropout = nn.Dropout(dropout[1])  # attention
        if 1 < k:
            self.p_net = nn.AvgPool1d(self.k, stride=self.k)

        if h_out is None:
            pass
        elif h_out <= self.c:
            self.h_mat = nn.Parameter(
                        torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
            self.h_bias = nn.Parameter(
                        torch.Tensor(1, h_out, 1, 1).normal_())
        else:
            self.h_net = weight_norm(
                            nn.Linear(h_dim * self.k, h_out), dim=None) 
开发者ID:linjieli222,项目名称:VQA_ReGAT,代码行数:31,代码来源:bc.py

示例14: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims):
        super(FCNet, self).__init__()

        layers = []
        for i in range(len(dims)-2):
            in_dim = dims[i]
            out_dim = dims[i+1]
            layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
            layers.append(nn.ReLU())
        layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
        layers.append(nn.ReLU())

        self.main = nn.Sequential(*layers) 
开发者ID:gicheonkang,项目名称:DAN-VisDial,代码行数:15,代码来源:fc.py

示例15: __init__

# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
        super(FIND, self).__init__()

        self.v_proj = FCNet([v_dim, num_hid])
        self.q_proj = FCNet([q_dim, num_hid])
        self.dropout = nn.Dropout(dropout)
        self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None) 
开发者ID:gicheonkang,项目名称:DAN-VisDial,代码行数:9,代码来源:modules.py


注:本文中的torch.nn.utils.weight_norm.weight_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。