本文整理汇总了Python中torch.autograd.gradcheck方法的典型用法代码示例。如果您正苦于以下问题:Python autograd.gradcheck方法的具体用法?Python autograd.gradcheck怎么用?Python autograd.gradcheck使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd
的用法示例。
在下文中一共展示了autograd.gradcheck方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_roi_align_rotated_autograd
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_roi_align_rotated_autograd(self):
# x1 = np.random.rand(4, 1, 12, 12).astype('float64')
# x2_t = np.array([[0, 6.2, 6.3, 4.2, 4.4, np.pi / 4.], [2, 4.1, 4.2, 6.2, 6.0, -np.pi],
# [1, 6.0, 6.3, 4.0, 4.1, 3 * np.pi / 4.]], dtype='float64')
# polys2_t = RotBox2Polys(x2_t[:, 1:])
x2 = np.array([[0, 6.2, 6.0, 4.0, 4.0, np.pi / 2.],
[0, 6.3, 6.0, 4.0, 4.0, -np.pi / 2.],
[0, 6.0, 6.0, 4.0, 4.0, -np.pi],
[0, 6.0, 6.0, 4.3, 4.0, np.pi],
[1, 6.0, 6.0, 4.0, 4.0, np.pi / 3.],
[2, 4.1, 4.2, 6.2, 6.0, -np.pi],
[1, 6.0, 6.3, 4.0, 4.1, 3 * np.pi / 4.],
[0, 6.2, 6.3, 4.2, 4.4, np.pi / 4.]
], dtype='float64')
x1 = torch.rand(4, 1, 12, 12, requires_grad=True, device='cuda:0')
x2 = torch.from_numpy(x2).float().cuda()
inputs = (x1, x2)
print('Gradcheck for roi align...')
spatial_scale = 1
test = gradcheck(RoIAlignRotated(4, spatial_scale), inputs, atol=1e-3, eps=1e-3)
print(test)
test = gradcheck(RoIAlignRotated(4, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3)
print(test)
示例2: test_conv2d_depthwise
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_conv2d_depthwise(self):
n = 6
x = Variable(torch.randn(1,n,5,5).double().cuda(), requires_grad=True)
w = Variable(torch.randn(n,1,3,3).double().cuda(), requires_grad=True)
y_fast = P.conv2d_depthwise(x, w, padding=1)
y_ref = F.conv2d(x, w, padding=1, groups=n)
go = torch.randn(y_fast.size()).double().cuda()
self.assertLess((y_fast - y_ref).data.abs().max(), 1e-9)
x.requires_grad = True
w.requires_grad = True
y_fast.backward(go)
gx_fast = x.grad.data.clone()
gw_fast = w.grad.data.clone()
x.grad.data.zero_()
w.grad.data.zero_()
y_ref.backward(go)
gx_ref = x.grad.data.clone()
gw_ref = w.grad.data.clone()
self.assertTrue(gradcheck(partial(P.conv2d_depthwise, padding=1), (x, w,)))
示例3: _test_grad_softmax
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def _test_grad_softmax(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SoftmaxFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SoftmaxFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
示例4: _test_grad_sigmoid
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def _test_grad_sigmoid(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SigmoidFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SigmoidFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
示例5: test_roipool_gradcheck
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_roipool_gradcheck(self):
if not torch.cuda.is_available():
return
from mmcv.ops import RoIPool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, device='cuda')
froipool = RoIPool((pool_h, pool_w), spatial_scale)
if _USING_PARROTS:
pass
# gradcheck(froipool, (x, rois), no_grads=[rois])
else:
gradcheck(froipool, (x, rois), eps=1e-2, atol=1e-2)
示例6: test_roi_align_rotated_gradcheck_cpu
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_roi_align_rotated_gradcheck_cpu(self):
dtype = torch.float64
device = torch.device("cpu")
roi_align_rotated_op = ROIAlignRotated(
output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1
).to(dtype=dtype, device=device)
x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True)
# roi format is (batch index, x_center, y_center, width, height, angle)
rois = torch.tensor(
[[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]],
dtype=dtype,
device=device,
)
def func(input):
return roi_align_rotated_op(input, rois)
assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU"
assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU"
示例7: check_gradient_dconv_double
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def check_gradient_dconv_double():
input = torch.randn(N, inC, inH, inW, dtype=torch.float64).cuda()
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW, dtype=torch.float64).cuda()
# offset.data.zero_()
# offset.data -= 0.00001
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW, dtype=torch.float64).cuda()
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW, dtype=torch.float64).cuda()
weight.requires_grad = True
bias = torch.rand(outC, dtype=torch.float64).cuda()
bias.requires_grad = True
func = DCNv2Function(stride=1, padding=1, dilation=1, deformable_groups=deformable_groups)
print(gradcheck(func, (input, offset, mask, weight, bias), eps=1e-6, atol=1e-5, rtol=1e-3))
示例8: check_gradient_dconv
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def check_gradient_dconv():
input = torch.randn(N, inC, inH, inW).cuda()
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda()
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda()
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW).cuda()
weight.requires_grad = True
bias = torch.rand(outC).cuda()
bias.requires_grad = True
func = DCNv2Function(stride=1, padding=1, dilation=1, deformable_groups=deformable_groups)
print(gradcheck(func, (input, offset, mask, weight, bias), eps=1e-3, atol=1e-3, rtol=1e-2))
示例9: check_gradient_dpooling
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5).cuda() * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).cuda().float()
x = torch.rand((N, 1)).cuda().float() * 15
y = torch.rand((N, 1)).cuda().float() * 15
w = torch.rand((N, 1)).cuda().float() * 10
h = torch.rand((N, 1)).cuda().float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3).cuda()
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=3,
output_dim=3,
no_trans=False,
group_size=1,
trans_std=0.0).cuda()
input.requires_grad = True
offset.requires_grad = True
print('check_gradient_dpooling', gradcheck(dpooling, (input, rois, offset), eps=1e-4))
示例10: test_swish
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_swish():
switch = SwishOP.apply
td = torch.rand(size=(2, 2), dtype=torch.double, requires_grad=True)
test = gradcheck(switch, td, eps=1e-6, atol=1e-4)
assert test
示例11: test_sparsemax_grad
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_sparsemax_grad():
x = torch.randn(4, 6, dtype=torch.float64, requires_grad=True)
gradcheck(sparsemax_bisect, (x,), eps=1e-5)
示例12: test_entmax_grad
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_entmax_grad(alpha):
alpha = torch.tensor(alpha, dtype=torch.float64, requires_grad=True)
x = torch.randn(4, 6, dtype=torch.float64, requires_grad=True)
gradcheck(entmax_bisect, (x, alpha), eps=1e-5)
示例13: test_entmax_grad_multiple_alphas
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_entmax_grad_multiple_alphas():
n = 4
x = torch.randn(n, 6, dtype=torch.float64, requires_grad=True)
alpha = 1.05 + torch.rand((n, 1), dtype=torch.float64, requires_grad=True)
gradcheck(entmax_bisect, (x, alpha), eps=1e-5)
示例14: test_arbitrary_dimension_grad
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_arbitrary_dimension_grad(dim):
shape = [3, 4, 2, 5]
alpha_shape = shape
alpha_shape[dim] = 1
f = partial(entmax_bisect, dim=dim)
X = torch.randn(*shape, dtype=torch.float64, requires_grad=True)
alphas = 1.05 + torch.rand(
alpha_shape, dtype=torch.float64, requires_grad=True
)
gradcheck(f, (X, alphas), eps=1e-5)
示例15: test_aggregate
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import gradcheck [as 别名]
def test_aggregate():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)
print('Testing aggregate(): {}'.format(test))