本文整理匯總了Python中torch.nn.utils.weight_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.weight_norm方法的具體用法?Python utils.weight_norm怎麽用?Python utils.weight_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn.utils
的用法示例。
在下文中一共展示了utils.weight_norm方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, num_filters_in, num_filters_out, filter_size=(2,3), stride=(1,1),
shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2d, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size, stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), # pad left
int((filter_size[1] - 1) / 2), # pad right
filter_size[0] - 1, # pad top
0) ) # pad down
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down :
self.down_shift = lambda x : down_shift(x, pad=nn.ZeroPad2d((0, 0, 1, 0)))
示例2: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, x_size, y_size, opt, prefix='decoder', dropout=None):
super(Classifier, self).__init__()
self.opt = opt
if dropout is None:
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(prefix), 0))
else:
self.dropout = dropout
self.merge_opt = opt.get('{}_merge_opt'.format(prefix), 0)
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False)
if self.merge_opt == 1:
self.proj = nn.Linear(x_size * 4, y_size)
else:
self.proj = nn.Linear(x_size * 2, y_size)
if self.weight_norm_on:
self.proj = weight_norm(self.proj)
示例3: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(Trilinear, self).__init__()
self.prefix = prefix
self.x_linear = nn.Linear(x1_dim, 1, bias=False)
self.x_dot_linear = nn.Linear(x1_dim, 1, bias=False)
self.y_linear = nn.Linear(x2_dim, 1, bias=False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.init = init_wrapper(opt.get('{}_init'.format(self.prefix), 'xavier_uniform'))
if self.layer_norm_on:
self.x_linear = weight_norm(self.x_linear)
self.x_dot_linear = weight_norm(self.x_dot_linear)
self.y_linear = weight_norm(self.y_linear)
self.init(self.x_linear.weight)
self.init(self.x_dot_linear.weight)
self.init(self.y_linear.weight)
self.dropout = dropout
示例4: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.PReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.PReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.PReLU()
self.init_weights()
示例5: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, num_inputs, depth, num_outputs):
super(ResNet_wobn, self).__init__()
self.in_planes = 64
block, num_blocks = cfg(depth)
self.conv0 = conv3x3(num_inputs, 32, 2) # 64
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=2) # 32
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) # 16
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=1)
self.conv4 = weightNorm(nn.Conv2d(512, 1, 1, 1, 0))
self.relu_1 = TReLU()
self.conv1 = weightNorm(nn.Conv2d(65 + 2, 64, 1, 1, 0))
self.conv2 = weightNorm(nn.Conv2d(64, 64, 1, 1, 0))
self.conv3 = weightNorm(nn.Conv2d(64, 32, 1, 1, 0))
self.relu_2 = TReLU()
self.relu_3 = TReLU()
self.relu_4 = TReLU()
示例6: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
示例7: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, block, layers, num_classes=1000, isL2 = False):
self.inplanes = 64
self.isL2 = isL2
super(ResNet_wn, self).__init__()
self.conv1 = weight_norm(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False))
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = weight_norm(nn.Linear(512 * block.expansion, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
示例8: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, block, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet_WN, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet_v2 depth should be 6n+4'
n = int((depth-4)/6)
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = weight_norm(conv3x3(3,nStages[0]))
self.layer1 = self._wide_layer(block, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(block, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(block, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = weight_norm(nn.Linear(nStages[3], num_classes))
示例9: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, x_size, y_size, opt, prefix="decoder", dropout=None):
super(Classifier, self).__init__()
self.opt = opt
if dropout is None:
self.dropout = DropoutWrapper(opt.get("{}_dropout_p".format(prefix), 0))
else:
self.dropout = dropout
self.merge_opt = opt.get("{}_merge_opt".format(prefix), 0)
self.weight_norm_on = opt.get("{}_weight_norm_on".format(prefix), False)
if self.merge_opt == 1:
self.proj = nn.Linear(x_size * 4, y_size)
else:
self.proj = nn.Linear(x_size * 2, y_size)
if self.weight_norm_on:
self.proj = weight_norm(self.proj)
示例10: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, x1_dim, x2_dim, prefix="sim", opt={}, dropout=None):
super(Trilinear, self).__init__()
self.prefix = prefix
self.x_linear = nn.Linear(x1_dim, 1, bias=False)
self.x_dot_linear = nn.Linear(x1_dim, 1, bias=False)
self.y_linear = nn.Linear(x2_dim, 1, bias=False)
self.layer_norm_on = opt.get("{}_norm_on".format(self.prefix), False)
self.init = init_wrapper(
opt.get("{}_init".format(self.prefix), "xavier_uniform")
)
if self.layer_norm_on:
self.x_linear = weight_norm(self.x_linear)
self.x_dot_linear = weight_norm(self.x_dot_linear)
self.y_linear = weight_norm(self.y_linear)
self.init(self.x_linear.weight)
self.init(self.x_dot_linear.weight)
self.init(self.y_linear.weight)
self.dropout = dropout
示例11: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, words_list):
super(Net, self).__init__()
num_hid = 1280
question_features = num_hid
vision_features = config.output_features
glimpses = 12
objects = 10
self.text = word_embedding.TextProcessor(
classes=words_list,
embedding_features=300,
lstm_features=question_features,
use_hidden=False,
drop=0.0,
)
self.count = Counter(objects)
self.attention = weight_norm(BiAttention(
v_features=vision_features,
q_features=question_features,
mid_features=num_hid,
glimpses=glimpses,
drop=0.5,), name='h_weight', dim=None)
self.apply_attention = ApplyAttention(
v_features=vision_features,
q_features=question_features,
mid_features=num_hid,
glimpses=glimpses,
num_obj=objects,
drop=0.2,
)
self.classifier = Classifier(
in_features=num_hid,
mid_features=num_hid * 2,
out_features=config.max_answers,
drop=0.5,)
示例12: __init__
# 需要導入模塊: from torch.nn import utils [as 別名]
# 或者: from torch.nn.utils import weight_norm [as 別名]
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
# in case of using upper character by mistake
self.activate = activate.lower() if (activate is not None) else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()