本文整理汇总了Python中torch.autograd.Variable.numel方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.numel方法的具体用法?Python Variable.numel怎么用?Python Variable.numel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.numel方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numel [as 别名]
def forward(self, input, pad_frame_size, patch_shape):
n_patch_h = patch_shape[0][0]
n_patch_w = patch_shape[0][1]
input_padded = Variable(torch.zeros((input.size(0), input.size(
1), input.size(2), pad_frame_size[0][0], pad_frame_size[0][1]))).cuda()
input_padded[:, :, :, 0:input.size(3), 0:input.size(4)] = input
# save dimensions
self.input_padded_size = input_padded.size()
self.input_padded_numel = input_padded.numel()
# This is the compressed frame!
weight = self.weight.repeat(input.size(0), input.size(
1), 1, n_patch_h + 1, n_patch_w + 1)
output = torch.mul(input_padded, weight).sum(2)
if self.noise is not None:
output = self.add_noise(output, input.size(), self.noise)
# Create patches from compressed frame
output_patches = output.unfold(2, self.spatial_size, self.step).unfold(
3, self.spatial_size, self.step)
self.patches_size = (output_patches.size(
1), output_patches.size(2), output_patches.size(3))
output_patches = output_patches.permute(0, 1, 2, 3, 5, 4).contiguous().view(
(output_patches.size(0), -1, self.spatial_size**2))
if self.mean is not None:
mean_var = Variable(torch.from_numpy(self.mean)).float().cuda()
std_var = Variable(torch.from_numpy(self.std)).float().cuda()
mean = mean_var.repeat(output_patches.size(
0), output_patches.size(1), 1)
std = std_var.repeat(output_patches.size(0),
output_patches.size(1), 1)
output_patches = output_patches - mean
output_patches = output_patches / std
return output_patches, output[:, :, 0:input.size(3), 0:input.size(4)]
示例2: test_flatten
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numel [as 别名]
def test_flatten(self):
# Flatten is a special case of Reshape when the output is a 2-D tensor.
x = Variable(torch.randn(1, 2, 3, 4), requires_grad=True)
self.assertONNX(lambda x: x.view(x.size()[0], x.numel() // x.size()[0]), x)