本文整理汇总了Python中torch.nn.utils.weight_norm.weight_norm方法的典型用法代码示例。如果您正苦于以下问题:Python weight_norm.weight_norm方法的具体用法?Python weight_norm.weight_norm怎么用?Python weight_norm.weight_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.utils.weight_norm
的用法示例。
在下文中一共展示了weight_norm.weight_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout=0, bias=True):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim, bias=bias),
dim=None))
if '' != act and act is not None:
layers.append(getattr(nn, act)())
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1], bias=bias),
dim=None))
if '' != act and act is not None:
layers.append(getattr(nn, act)())
self.main = nn.Sequential(*layers)
示例2: make_layers
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def make_layers(cfg, batch_norm=False, weight_norm=False):
layers = []
in_channels = 3
for i,v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if i == len(cfg)-1:
layers += [conv2d]
break
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
elif weight_norm:
layers += [weight_norm(conv2d), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
示例3: fcReLU
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def fcReLU(in_ch,out_ch,norm,dropout=None,relu=True):
fc = nn.Linear(in_ch,out_ch)
if 'weight' in norm:
layers = [weight_norm(fc)]
else:
layers = [fc]
if 'batch' in norm:
layers.append(nn.BatchNorm1d(out_ch))
elif 'instance' in norm:
layers.append(nn.InstanceNorm1d(out_ch))
elif 'group' in norm:
layers.append(nn.GroupNorm(getGroupSize(out_ch),out_ch))
if dropout is not None:
if dropout != False:
layers.append(nn.Dropout(p=0.1,inplace=True))
if relu:
layers += [nn.ReLU(inplace=True)]
return layers
示例4: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout=0):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if ''!=act:
layers.append(getattr(nn, act)())
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
if ''!=act:
layers.append(getattr(nn, act)())
self.main = nn.Sequential(*layers)
示例5: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=[.2,.5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim; self.q_dim = q_dim
self.h_dim = h_dim; self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if None == h_out:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None)
示例6: EncoderImage
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def EncoderImage(data_name, img_dim, embed_size, precomp_enc_type='basic', no_imgnorm=False):
"""A wrapper to image encoders. Chooses between an different encoders
that uses precomputed image features."""
if precomp_enc_type == 'basic':
img_enc = EncoderImagePrecomp(
img_dim, embed_size, no_imgnorm)
elif precomp_enc_type == 'weight_norm':
img_enc = EncoderImageWeightNormPrecomp(
img_dim, embed_size, no_imgnorm)
else:
raise ValueError("Unknown precomp_enc_type: {}".format(precomp_enc_type))
return img_enc
示例7: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, img_dim, embed_size, no_imgnorm=False):
super(EncoderImageWeightNormPrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None)
示例8: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ReLU', dropout_r=0.0):
super(MLP, self).__init__()
layers = []
for i in range(len(dims) - 1):
in_dim = dims[i]
out_dim = dims[i + 1]
if dropout_r > 0:
layers.append(nn.Dropout(dropout_r))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if act != '':
layers.append(getattr(nn, act)())
self.mlp = nn.Sequential(*layers)
示例9: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims, act='ELU', dropout_r=0.0):
super(MLP, self).__init__()
layers = []
for i in range(len(dims) - 1):
in_dim = dims[i]
out_dim = dims[i + 1]
if dropout_r > 0:
layers.append(nn.Dropout(dropout_r))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if act != '':
layers.append(getattr(nn, act)())
self.mlp = nn.Sequential(*layers)
示例10: EncoderImage
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def EncoderImage(data_name, img_dim, embed_size, precomp_enc_type='basic',
no_imgnorm=False):
"""A wrapper to image encoders. Chooses between an different encoders
that uses precomputed image features.
"""
if precomp_enc_type == 'basic':
img_enc = EncoderImagePrecomp(
img_dim, embed_size, no_imgnorm)
elif precomp_enc_type == 'weight_norm':
img_enc = EncoderImageWeightNormPrecomp(
img_dim, embed_size, no_imgnorm)
else:
raise ValueError("Unknown precomp_enc_type: {}".format(precomp_enc_type))
return img_enc
示例11: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[.2, .5]):
super(BiAttention, self).__init__()
self.glimpse = glimpse
self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse,
dropout=dropout, k=3),
name='h_mat', dim=None)
示例12: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, feat_dim, nongt_dim=20, pos_emb_dim=-1,
num_heads=16, dropout=[0.2, 0.5]):
""" Attetion module with vectorized version
Args:
position_embedding: [num_rois, nongt_dim, pos_emb_dim]
used in implicit relation
pos_emb_dim: set as -1 if explicit relation
nongt_dim: number of objects consider relations per image
fc_dim: should be same as num_heads
feat_dim: dimension of roi_feat
num_heads: number of attention heads
Returns:
output: [num_rois, ovr_feat_dim, output_dim]
"""
super(GraphSelfAttentionLayer, self).__init__()
# multi head
self.fc_dim = num_heads
self.feat_dim = feat_dim
self.dim = (feat_dim, feat_dim, feat_dim)
self.dim_group = (int(self.dim[0] / num_heads),
int(self.dim[1] / num_heads),
int(self.dim[2] / num_heads))
self.num_heads = num_heads
self.pos_emb_dim = pos_emb_dim
if self.pos_emb_dim > 0:
self.pair_pos_fc1 = FCNet([pos_emb_dim, self.fc_dim], None, dropout[0])
self.query = FCNet([feat_dim, self.dim[0]], None, dropout[0])
self.nongt_dim = nongt_dim
self.key = FCNet([feat_dim, self.dim[1]], None, dropout[0])
self.linear_out_ = weight_norm(
nn.Conv2d(in_channels=self.fc_dim * feat_dim,
out_channels=self.dim[2],
kernel_size=(1, 1),
groups=self.fc_dim), dim=None)
示例13: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU',
dropout=[.2, .5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim
self.q_dim = q_dim
self.h_dim = h_dim
self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act,
dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act,
dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if h_out is None:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(
torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(
torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(
nn.Linear(h_dim * self.k, h_out), dim=None)
示例14: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
示例15: __init__
# 需要导入模块: from torch.nn.utils import weight_norm [as 别名]
# 或者: from torch.nn.utils.weight_norm import weight_norm [as 别名]
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(FIND, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)