当前位置: 首页>>代码示例>>Python>>正文


Python Variable.requires_grad方法代码示例

本文整理汇总了Python中torch.autograd.Variable.requires_grad方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.requires_grad方法的具体用法?Python Variable.requires_grad怎么用?Python Variable.requires_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.requires_grad方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reconstruct_cells_red_first

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import requires_grad [as 别名]
def reconstruct_cells_red_first(imgs, netG, opt, n_bfgs_iter=100, lbfgs_lr=0.1):
    assert(imgs.size(1) == 2)
    n_red_noise = int(opt.nz * opt.red_portion)
    n_green_noise = opt.nz - n_red_noise
    red_noise = torch.FloatTensor(int(opt.batch_size), n_red_noise, 1, 1).normal_(0, 1)
    green_noise = torch.FloatTensor(int(opt.batch_size), n_green_noise, 1, 1).normal_(0, 1)

    if opt.cuda:
        red_noise = red_noise.cuda()
        green_noise = green_noise.cuda()

    red_noise = Variable(red_noise)
    red_noise.requires_grad = True
    green_noise = Variable(green_noise)
    green_noise.requires_grad = False

    noise = torch.cat([red_noise, green_noise], 1)
    noise_init = noise.clone()

    optim_input = optim.LBFGS([red_noise], lr=lbfgs_lr)

    def red_closure():
        optim_input.zero_grad()
        noise = torch.cat([red_noise, green_noise], 1)
        gen_img = netG(noise)

        l2_loss = torch.mean((imgs[:, 0, :, :] - gen_img[:, 0, :, :]) ** 2)
        l2_loss.backward()
        # print(l2_loss.data[0])
        # sys.stdout.flush()
        return l2_loss

    # Do the optimization across batch
    for i in tqdm(range(n_bfgs_iter)):
        optim_input.step(red_closure)

    # Optimization across green channel now
    red_noise.requires_grad = False
    green_noise.requires_grad = True
    optim_input = optim.LBFGS([green_noise], lr=lbfgs_lr)

    def green_closure():
        optim_input.zero_grad()
        noise = torch.cat([red_noise, green_noise], 1)
        gen_img = netG(noise)

        l2_loss = torch.mean((imgs[:, 1, :, :] - gen_img[:, 1, :, :]) ** 2)
        l2_loss.backward()
        # print(l2_loss.data[0])
        # sys.stdout.flush()
        return l2_loss

    for i in tqdm(range(n_bfgs_iter)):
        optim_input.step(green_closure)

    # do not forget this because real variables are red_noise and green_noise
    noise = torch.cat([red_noise, green_noise], 1)

    return noise_init, noise
开发者ID:TaihuLight,项目名称:biogans,代码行数:61,代码来源:reconstruction.py

示例2: reconstruct_cells

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import requires_grad [as 别名]
def reconstruct_cells(imgs, netG, opt, n_bfgs_iter=100, lbfgs_lr=0.1):
    noise = torch.FloatTensor(int(opt.batch_size), opt.nz, 1, 1)
    noise.normal_(0, 1)

    if opt.cuda:
        noise = noise.cuda()

    noise = Variable(noise)
    noise.requires_grad = True
    noise_init = noise.clone()

    optim_input = optim.LBFGS([noise], lr=lbfgs_lr)

    def closure():
        optim_input.zero_grad()
        gen_img = netG(noise)

        l2_loss = torch.mean((imgs - gen_img) ** 2)
        l2_loss.backward()
        # print(l2_loss.data[0])
        # sys.stdout.flush()
        return l2_loss

    # Do the optimization across batch
    for i in tqdm(range(n_bfgs_iter)):
        optim_input.step(closure)
    return noise_init, noise
开发者ID:TaihuLight,项目名称:biogans,代码行数:29,代码来源:reconstruction.py

示例3: test_flags

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import requires_grad [as 别名]
    def test_flags(self):
        x = Variable(torch.randn(2, 2))
        y = Variable(torch.randn(2, 2))

        @torch.jit.compile
        def fn(x, y):
            return (x * x + y * y + x * y).sum()

        grads = {}
        for rx, ry in product((True, False), repeat=2):
            x.requires_grad = rx
            y.requires_grad = ry

            self.assertFalse(fn.has_trace_for(x, y))
            out = fn(x, y)

            self.assertFalse(fn.has_trace_for(x, y))
            for v, name, compute in [(x, 'x', rx), (y, 'y', ry)]:
                if not compute:
                    continue
                grad_v, = torch.autograd.grad(out, v, retain_graph=True)
                expected_grad = grads.setdefault(name, grad_v)
                self.assertEqual(grad_v, expected_grad)
            self.assertEqual(fn.has_trace_for(x, y), rx or ry)
开发者ID:Northrend,项目名称:pytorch,代码行数:26,代码来源:test_jit.py


注:本文中的torch.autograd.Variable.requires_grad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。