本文整理汇总了Python中torch.nn.functional.conv3d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.conv3d方法的具体用法?Python functional.conv3d怎么用?Python functional.conv3d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.conv3d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: padding3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def padding3d(tensor, filter, mode=str('constant')):
"""
Input shape (BN, C, T, H, W)
"""
it, ih, iw = tensor.shape[2:]
ft, fh, fw = filter.shape
pt = max(0, (it - 1) + (ft - 1) + 1 - it)
ph = max(0, (ih - 1) + (fh - 1) + 1 - ih)
pw = max(0, (iw - 1) + (fw - 1) + 1 - iw)
oddt = (pt % 2 != 0)
oddh = (ph % 2 != 0)
oddw = (pw % 2 != 0)
if any([oddt, oddh, oddw]):
pad = [0, int(oddt), 0, int(oddh), 0, int(oddw)]
tensor = F.pad(tensor, pad, mode=mode)
padding = (pt // 2, ph // 2, pw // 2)
tensor = F.conv3d(tensor, filter, padding=padding)
return tensor
示例2: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def __init__(self, spatial_dims: int, sigma, truncated: float = 4.0):
"""
Args:
spatial_dims: number of spatial dimensions of the input image.
must have shape (Batch, channels, H[, W, ...]).
sigma (float or sequence of floats): std.
truncated: spreads how many stds.
"""
super().__init__()
self.spatial_dims = int(spatial_dims)
_sigma = ensure_tuple_rep(sigma, self.spatial_dims)
self.kernel = [
torch.nn.Parameter(torch.as_tensor(gaussian_1d(s, truncated), dtype=torch.float), False) for s in _sigma
]
self.padding = [same_padding(k.size()[0]) for k in self.kernel]
self.conv_n = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1]
for idx, param in enumerate(self.kernel):
self.register_parameter(f"kernel_{idx}", param)
示例3: compute_local_sums
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def compute_local_sums(I, J, filt, stride, padding, win):
I2 = I * I
J2 = J * J
IJ = I * J
I_sum = F.conv3d(I, filt, stride=stride, padding=padding)
J_sum = F.conv3d(J, filt, stride=stride, padding=padding)
I2_sum = F.conv3d(I2, filt, stride=stride, padding=padding)
J2_sum = F.conv3d(J2, filt, stride=stride, padding=padding)
IJ_sum = F.conv3d(IJ, filt, stride=stride, padding=padding)
win_size = np.prod(win)
u_I = I_sum / win_size
u_J = J_sum / win_size
cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
return I_var, J_var, cross
示例4: quaternion_conv
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
padding, groups, dilatation):
"""
Applies a quaternion convolution to the incoming data:
"""
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)
if input.dim() == 3:
convfunc = F.conv1d
elif input.dim() == 4:
convfunc = F.conv2d
elif input.dim() == 5:
convfunc = F.conv3d
else:
raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
" input.dim = " + str(input.dim()))
return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups)
示例5: update_params
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def update_params(self, VdivWZH, update_W, update_H, update_Z, W_alpha, H_alpha, Z_alpha):
# type: (Tensor, bool, bool, bool, float, float, float) -> None
VdivWZH = VdivWZH.view(self.channel, 1, self.N, self.K, self.M)
if update_W or update_Z:
new_W = F.conv3d(VdivWZH, self.H.mul(self.Z[:, None, None, None])[:, None], padding=self.pad_size) * self.W
if update_H:
new_H = F.conv3d(VdivWZH.transpose(0, 1), torch.transpose(self.W * self.Z[:, None, None, None], 0, 1))[
0] * self.H
new_H = normalize(self.fix_neg(new_H + H_alpha - 1), (1, 2, 3))
self.H[:] = new_H
if update_W:
self.W[:] = normalize(self.fix_neg(new_W + W_alpha - 1), (0, 2, 3, 4))
if update_Z:
Z = normalize(self.fix_neg(new_W.sum((0, 2, 3, 4)) + Z_alpha - 1))
self.Z[:] = Z
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def forward(self, x):
if self.deterministic:
assert self.training == False, "Flag deterministic is True. This should not be used in training."
return F.conv3d(x, self.post_weight_mu, self.bias_mu, self.stride, self.padding, self.dilation, self.groups)
batch_size = x.size()[0]
# apply local reparametrisation trick see [1] Eq. (6)
# to the parametrisation given in [3] Eq. (6)
mu_activations = F.conv3d(x, self.weight_mu, self.bias_mu, self.stride,
self.padding, self.dilation, self.groups)
var_weights = self.weight_logvar.exp()
var_activations = F.conv3d(x.pow(2), var_weights, self.bias_logvar.exp(), self.stride,
self.padding, self.dilation, self.groups)
# compute z
# note that we reparametrise according to [2] Eq. (11) (not [1])
z = reparametrize(self.z_mu.repeat(batch_size, 1, 1, 1, 1), self.z_logvar.repeat(batch_size, 1, 1, 1, 1),
sampling=self.training, cuda=self.cuda)
z = z[:, :, None, None, None]
return reparametrize(mu_activations * z, (var_activations * z.pow(2)).log(), sampling=self.training,
cuda=self.cuda)
示例7: quaternion_conv
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
padding, groups, dilatation):
"""
Applies a quaternion convolution to the incoming data:
"""
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)
if input.dim() == 3:
convfunc = F.conv1d
elif input.dim() == 4:
convfunc = F.conv2d
elif input.dim() == 5:
convfunc = F.conv3d
else:
raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
" input.dim = " + str(input.dim()))
return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups)
示例8: test_conv3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def test_conv3d(self):
# Data and weight tensors
tensor3d_in_conv = torch.randn(32, 3, 16, 224, 224, device='cuda', dtype=self.dtype)
tensor3d_in_conv_grouped = torch.randn(32, 6, 16, 224, 224, device='cuda', dtype=self.dtype)
conv3d_filter = torch.randn(16, 3, 3, 3, 3, device='cuda', dtype=self.dtype)
conv3d_bias = torch.ones(16, device='cuda', dtype=self.dtype)
# Vanilla conv3d
conv3d_out_vanilla = F.conv3d(tensor3d_in_conv, conv3d_filter)
# conv3d - stride > 1
conv3d_out_strided = F.conv3d(tensor3d_in_conv, conv3d_filter, stride=2)
# conv3d - dilation > 1
conv3d_out_dilated = F.conv3d(tensor3d_in_conv, conv3d_filter, dilation=2)
# conv3d - groups > 1
conv3d_out_grouped = F.conv3d(tensor3d_in_conv_grouped, conv3d_filter, groups=2)
# conv3d - padding with zeros
conv3d_out_padding_zeros = F.conv3d(tensor3d_in_conv, conv3d_filter, padding=6)
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def forward(self, input):
x = torch.stack([
F.conv3d(input,
self.weight.view(*self.channel_shape, *kernel_size),
self.bias, self.stride, padding, self.dilation, self.groups)
for kernel_size, padding in zip(self.kernel_sizes, self.paddings)], -1)
x = self.linear(x)[..., 0]
return x
示例10: __call__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def __call__(self, img):
"""
Args:
img: torch tensor data to extract the contour, with shape: [batch_size, channels, height, width[, depth]]
Returns:
A torch tensor with the same shape as img, note:
1. it's the binary classification result of whether a pixel is edge or not.
2. in order to keep the original shape of mask image, we use padding as default.
3. the edge detection is just approximate because it defects inherent to Laplace kernel,
ideally the edge should be thin enough, but now it has a thickness.
"""
channels = img.shape[1]
if img.ndim == 4:
kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)
kernel = kernel.repeat(channels, 1, 1, 1)
contour_img = F.conv2d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
elif img.ndim == 5:
kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)
kernel[1, 1, 1] = 26
kernel = kernel.repeat(channels, 1, 1, 1, 1)
contour_img = F.conv3d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
else:
raise RuntimeError("the dimensions of img should be 4 or 5.")
torch.clamp_(contour_img, min=0.0, max=1.0)
return contour_img
示例11: reconstruct
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def reconstruct(self, W, Z, H):
out = F.conv3d(H[None, ...], W.mul(Z[:, None, None, None]).flip((2, 3, 4)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
示例12: reconstruct
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def reconstruct(self, H, W):
out = F.conv3d(H[None, ...], W.flip((2, 3, 4)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
示例13: get_W_positive
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum((1, 2, 3))
denominator = H_sum[None, :, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(self.channel, 1, self.N, self.K, self.M)
WHHt = F.conv3d(WH, H[:, None])
denominator = WHHt
return denominator, H_sum
示例14: get_H_positive
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2, 3, 4))
denominator = W_sum[:, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(1, self.channel, self.N, self.K, self.M)
WtWH = F.conv3d(WH, W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
示例15: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv3d [as 别名]
def __init__(self, fixed_image, moving_image,fixed_mask=None, moving_mask=None, sigma=[3], kernel_type="box", size_average=True, reduce=True):
super(LCC, self).__init__(fixed_image, moving_image, fixed_mask, moving_mask, size_average, reduce)
self._name = "lcc"
self.warped_moving_image = th.empty_like(self._moving_image.image, dtype=self._dtype, device=self._device)
self._kernel = None
dim = len(self._moving_image.size)
sigma = np.array(sigma)
if sigma.size != dim:
sigma_app = sigma[-1]
while sigma.size != dim:
sigma = np.append(sigma, sigma_app)
if kernel_type == "box":
kernel_size = sigma*2 + 1
self._kernel = th.ones(*kernel_size.tolist(), dtype=self._dtype, device=self._device) \
/ float(np.product(kernel_size)**2)
elif kernel_type == "gaussian":
self._kernel = utils.gaussian_kernel(sigma, dim, asTensor=True, dtype=self._dtype, device=self._device)
self._kernel.unsqueeze_(0).unsqueeze_(0)
if dim == 2:
self._lcc_loss = self._lcc_loss_2d # 2d lcc
self._mean_fixed_image = F.conv2d(self._fixed_image.image, self._kernel)
self._variance_fixed_image = F.conv2d(self._fixed_image.image.pow(2), self._kernel) \
- (self._mean_fixed_image.pow(2))
elif dim == 3:
self._lcc_loss = self._lcc_loss_3d # 3d lcc
self._mean_fixed_image = F.conv3d(self._fixed_image.image, self._kernel)
self._variance_fixed_image = F.conv3d(self._fixed_image.image.pow(2), self._kernel) \
- (self._mean_fixed_image.pow(2))