本文整理匯總了Python中torch.nn.Unfold方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Unfold方法的具體用法?Python nn.Unfold怎麽用?Python nn.Unfold使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.Unfold方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def forward(self, x, noise_map):
N, _, H, W = x.size() # compute size of input
# Downscale input using nn.Unfold
x1 = self.downscale(x)
x1 = x1.reshape(N, self.down_input_channels, H//2, W//2)
# Concat downscaled input with downsampled noise map
x1 = torch.cat((noise_map[:, :, ::2, ::2], x1), 1)
# Conv + ReLU + BN
x1 = self.conv_relu_bn(x1)
# Upscale back to original resolution
x1 = self.pixelshuffle(x1)
# Residual learning
x = x - x1
return x
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def __init__(self, ch_in):
super(RefineFlow, self).__init__()
self.kernel_size = 3
self.pad_size = 1
self.pad_ftn = nn.ReplicationPad2d(self.pad_size)
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 1),
conv(128, 64, 3, 1, 1),
conv(64, 64, 3, 1, 1),
conv(64, 32, 3, 1, 1),
conv(32, 32, 3, 1, 1),
conv(32, self.kernel_size * self.kernel_size, 3, 1, 1)
)
self.softmax_feat = nn.Softmax(dim=1)
self.unfold_flow = nn.Unfold(kernel_size=(self.kernel_size, self.kernel_size))
self.unfold_kernel = nn.Unfold(kernel_size=(1, 1))
示例3: unfold_func
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def unfold_func(module):
return Unfold(
kernel_size=module.kernel_size,
dilation=module.dilation,
padding=module.padding,
stride=module.stride,
)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def __init__(self):
super(DVDnet_spatial, self).__init__()
self.down_kernel_size = (2, 2)
self.down_stride = 2
self.kernel_size = 3
self.padding = 1
# RGB image
self.num_input_channels = 6
self.middle_features = 96
self.num_conv_layers = 12
self.down_input_channels = 12
self.downsampled_channels = 15
self.output_features = 12
self.downscale = nn.Unfold(kernel_size=self.down_kernel_size, stride=self.down_stride)
layers = []
layers.append(nn.Conv2d(in_channels=self.downsampled_channels,\
out_channels=self.middle_features,\
kernel_size=self.kernel_size,\
padding=self.padding,\
bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(self.num_conv_layers-2):
layers.append(nn.Conv2d(in_channels=self.middle_features,\
out_channels=self.middle_features,\
kernel_size=self.kernel_size,\
padding=self.padding,\
bias=False))
layers.append(nn.BatchNorm2d(self.middle_features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=self.middle_features,\
out_channels=self.output_features,\
kernel_size=self.kernel_size,\
padding=self.padding,\
bias=False))
self.conv_relu_bn = nn.Sequential(*layers)
self.pixelshuffle = nn.PixelShuffle(2)
# Init weights
self.reset_params()
示例5: transform
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def transform(self, data, **kwargs):
ndim = data.ndim
if ndim > 3:
# move sources to the batch dimension
# then fix it later
num_sources = data.shape[-1]
data = data.permute(0, -1, 1, 2)
data = data.reshape(-1, *data.shape[2:])
self.original_length = data.shape[-1]
pad_extra = (
(-(data.shape[-1] - self.filter_length) % self.hop_length)
% self.filter_length
)
pad_tuple = (
self.filter_length // 2,
self.filter_length // 2 + pad_extra
)
data = nn.functional.pad(data, pad_tuple)
self.output_length = data.shape[-1]
num_batch, num_audio_channels, num_samples = data.shape
unfold = nn.Unfold(
kernel_size=(1, self.filter_length),
stride=(1, self.hop_length),
dilation=self.dilation,
padding=(0, 0)
)
data = data.reshape(
num_batch * num_audio_channels,
1, 1, num_samples
)
data = unfold(data)
data = data.view(
num_batch, num_audio_channels,
self.filter_length, -1
)
data = data.permute(0, 3, 2, 1)
data = data * self.window
data = self.apply_filter(data, **kwargs)
if ndim > 3:
# then we moved sources to the batch dimension
# we need to move it back before returning
data = data.reshape(
-1, num_sources, *data.shape[1:])
data = data.permute(0, 2, 3, 4, 1)
return data
示例6: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Unfold [as 別名]
def forward(self, data):
fold = nn.Fold(
(1, data.shape[1]),
(1, self.chunk_size),
stride=(1, self.hop_size)
)
unfold = nn.Unfold(
(1, self.chunk_size), stride=(1, self.hop_size)
)
batch_size, sequence_length, n_features, n_channels = (
data.shape
)
# extract chunks
data = data.transpose(1, 2)
data = self.bottleneck_norm(data)
data = data.transpose(1, -1)
data = self.bottleneck(data)
data = data.permute(0, 2, 3, 1)
data = data.reshape(
batch_size, sequence_length, 1, -1)
data = data.transpose(3, 1)
data = unfold(data)
# unfold makes the data (batch_size, bottleneck_size * chunk_size, n_chunks)
n_chunks = data.shape[-1]
data = data.reshape(batch_size, -1, self.chunk_size, n_chunks)
data = data.transpose(3, 1)
# data is now (batch_size, n_chunks, chunk_size, bottleneck_size)
# process
output = data # Skip connection --->
for layer in self.layers: # |
data = layer(output) # |
if self.skip_connection: # |
output += data # <----<----<
else:
output = data
data = output
data = self.prelu(data)
# data is still (batch_size, n_chunks, chunk_size, bottleneck_size)
data = self.inv_bottleneck(data)
# data is now (batch_size, n_chunks, chunk_size, in_features)
data = data.transpose(1, -1)
data = self.output_norm(data)
data = data.transpose(1, -1)
# resynthesize with overlap/add
data = data.transpose(1, 3)
data = data.reshape(-1, n_features * self.chunk_size, n_chunks)
data = fold(data)
data = data.transpose(3, 1)
data = data.reshape(
batch_size, sequence_length, n_features, n_channels)
# renormalize after overlap/add
data = data / (self.chunk_size / self.hop_size)
return data