当前位置: 首页>>代码示例>>Python>>正文


Python Variable.clone方法代码示例

本文整理汇总了Python中torch.autograd.Variable.clone方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.clone方法的具体用法?Python Variable.clone怎么用?Python Variable.clone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compare_grid_sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
def compare_grid_sample():
    # do gradcheck
    N = random.randint(1, 8)
    C = 2 # random.randint(1, 8)
    H = 5 # random.randint(1, 8)
    W = 4 # random.randint(1, 8)
    input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True)
    input_p = input.clone().data.contiguous()
   
    grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True)
    grid_clone = grid.clone().contiguous()

    out_offcial = F.grid_sample(input, grid)    
    grad_outputs = Variable(torch.rand(out_offcial.size()).cuda())
    grad_outputs_clone = grad_outputs.clone().contiguous()
    grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous())
    grad_input_off = grad_inputs[0]


    crf = RoICropFunction()
    grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda()
    out_stn = crf.forward(input_p, grid_yx)
    grad_inputs = crf.backward(grad_outputs_clone.data)
    grad_input_stn = grad_inputs[0]
    pdb.set_trace()

    delta = (grad_input_off.data - grad_input_stn).sum()
开发者ID:XiongweiWu,项目名称:faster-rcnn.pytorch,代码行数:29,代码来源:net_utils.py

示例2: test_sparse_variable_methods

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
    def test_sparse_variable_methods(self):
        # TODO: delete when tensor/variable are merged
        from torch.autograd import Variable
        i = self.IndexTensor([[0, 1, 1], [2, 0, 2]])
        v = self.ValueTensor([3, 4, 5])
        sparse_mat = self.SparseTensor(i, v, torch.Size([2, 3]))
        sparse_var = Variable(sparse_mat)

        to_test_one_arg = {
            'zeros_like': lambda x: torch.zeros_like(x),
            'transpose': lambda x: x.transpose(0, 1),
            'transpose_': lambda x: x.transpose(0, 1),
            't': lambda x: x.t(),
            't_': lambda x: x.t_(),
            'div': lambda x: x.div(2),
            'div_': lambda x: x.div_(2),
            'pow': lambda x: x.pow(2),
            '_nnz': lambda x: x._nnz(),
            'is_coalesced': lambda x: x.is_coalesced(),
            'coalesce': lambda x: x.coalesce(),
            'to_dense': lambda x: x.to_dense(),
            '_dimI': lambda x: x._dimI(),
            '_dimV': lambda x: x._dimV(),
        }

        for test_name, test_fn in to_test_one_arg.items():
            var1 = sparse_var.clone()
            tensor1 = sparse_mat.clone()

            out_var = test_fn(var1)
            out_tensor = test_fn(tensor1)

            if isinstance(out_tensor, int) or isinstance(out_tensor, bool):
                self.assertEqual(out_var, out_tensor)
                continue

            # Assume output is variable / tensor
            self.assertEqual(test_fn(var1).data, test_fn(tensor1),
                             test_name)

        i = self.IndexTensor([[0, 0, 1], [1, 2, 1]])
        v = self.ValueTensor([3, 3, 4])
        sparse_mat2 = self.SparseTensor(i, v, torch.Size([2, 3]))
        sparse_var2 = Variable(sparse_mat2)

        to_test_two_arg = {
            'sub': lambda x, y: x.sub(y),
            'sub_': lambda x, y: x.sub_(y),
            'mul': lambda x, y: x.mul(y),
            'mul_': lambda x, y: x.mul_(y),
        }

        for test_name, test_fn in to_test_two_arg.items():
            var1 = sparse_var.clone()
            var2 = sparse_var2.clone()
            tensor1 = sparse_mat.clone()
            tensor2 = sparse_mat2.clone()
            self.assertEqual(test_fn(var1, var2).data,
                             test_fn(tensor1, tensor2), test_name)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:61,代码来源:test_sparse.py

示例3: test_basic_advanced_combined

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
    def test_basic_advanced_combined(self):
        # From the NumPy indexing example
        x = Variable(torch.arange(0, 12).view(4, 3))
        self.assertEqual(x[1:2, 1:3], x[1:2, [1, 2]])
        self.assertEqual(x[1:2, 1:3].data.tolist(), [[4, 5]])

        # Check that it is a copy
        unmodified = x.clone()
        x[1:2, [1, 2]].zero_()
        self.assertEqual(x, unmodified)

        # But assignment should modify the original
        unmodified = x.clone()
        x[1:2, [1, 2]] = 0
        self.assertNotEqual(x, unmodified)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:17,代码来源:test_indexing.py

示例4: reconstruct_cells

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
def reconstruct_cells(imgs, netG, opt, n_bfgs_iter=100, lbfgs_lr=0.1):
    noise = torch.FloatTensor(int(opt.batch_size), opt.nz, 1, 1)
    noise.normal_(0, 1)

    if opt.cuda:
        noise = noise.cuda()

    noise = Variable(noise)
    noise.requires_grad = True
    noise_init = noise.clone()

    optim_input = optim.LBFGS([noise], lr=lbfgs_lr)

    def closure():
        optim_input.zero_grad()
        gen_img = netG(noise)

        l2_loss = torch.mean((imgs - gen_img) ** 2)
        l2_loss.backward()
        # print(l2_loss.data[0])
        # sys.stdout.flush()
        return l2_loss

    # Do the optimization across batch
    for i in tqdm(range(n_bfgs_iter)):
        optim_input.step(closure)
    return noise_init, noise
开发者ID:TaihuLight,项目名称:biogans,代码行数:29,代码来源:reconstruction.py

示例5: test_inplace_transplant

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
 def test_inplace_transplant(self):
     x = Variable(torch.Tensor([0]), requires_grad=True)
     trace = torch._C._tracer_enter((x,), 0)
     y = x.clone()
     y.add_(2)
     y.add_(3)
     torch._C._tracer_exit((y,))
     self.assertExpected(str(trace))
开发者ID:Northrend,项目名称:pytorch,代码行数:10,代码来源:test_jit.py

示例6: test_empty_index

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
    def test_empty_index(self):
        x = Variable(torch.arange(0, 12).view(4, 3))
        idx = Variable(torch.LongTensor())
        self.assertEqual(x[idx].numel(), 0)

        # empty assignment should have no effect but not throw an exception
        y = x.clone()
        y[idx] = -1
        self.assertEqual(x, y)

        mask = torch.zeros(4, 3).byte()
        y[mask] = -1
        self.assertEqual(x, y)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:15,代码来源:test_indexing.py

示例7: test_setitem_scalars

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
    def test_setitem_scalars(self):
        zero = variable(0).long()

        # non-scalar indexed with scalars
        a = Variable(torch.randn(2, 3))
        a_set_with_number = a.clone()
        a_set_with_scalar = a.clone()
        b = Variable(torch.randn(3))

        a_set_with_number[0] = b
        a_set_with_scalar[zero] = b
        self.assertEqual(a_set_with_number, a_set_with_scalar)
        a[1, zero] = 7.7
        self.assertEqual(7.7, a[1, 0])

        # scalar indexed with scalars
        r = variable(0).normal_()
        with self.assertRaises(RuntimeError):
            r[:] = 8.8
        with self.assertRaises(RuntimeError):
            r[zero] = 8.8
        r[...] = 9.9
        self.assertEqual(9.9, r)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:25,代码来源:test_indexing.py

示例8: test_sparse_variable_methods

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
    def test_sparse_variable_methods(self):
        # TODO: delete when tensor/variable are merged
        from torch.autograd import Variable
        i = self.IndexTensor([[0, 1, 1], [2, 0, 2]])
        v = self.ValueTensor([3, 4, 5])
        sparse_mat = self.SparseTensor(i, v, torch.Size([2, 3]))
        sparse_var = Variable(sparse_mat)

        to_test_one_arg = {
            'zeros_like': lambda x: torch.zeros_like(x),
            'transpose': lambda x: x.transpose(0, 1),
            'transpose_': lambda x: x.transpose_(0, 1),
            't': lambda x: x.t(),
            't_': lambda x: x.t_(),
            'div': lambda x: x.div(2),
            'div_': lambda x: x.div_(2),
            'pow': lambda x: x.pow(2),
            '_nnz': lambda x: x._nnz(),
            'is_coalesced': lambda x: x.is_coalesced(),
            'coalesce': lambda x: x.coalesce(),
            'to_dense': lambda x: x.to_dense(),
            '_sparseDims': lambda x: x._sparseDims(),
            '_denseDims': lambda x: x._denseDims(),
            'norm': lambda x: x.norm(),
        }

        for test_name, test_fn in to_test_one_arg.items():
            var1 = sparse_var.clone()
            tensor1 = sparse_mat.clone()

            out_var = test_fn(var1)
            out_tensor = test_fn(tensor1)

            if isinstance(out_tensor, int) or isinstance(out_tensor, bool):
                if not isinstance(out_var, int) and not isinstance(out_var, bool):
                    check_var = out_var.data[0]
                else:
                    check_var = out_var
                self.assertEqual(out_var, out_tensor)
                continue

            # Assume output is variable / tensor
            self.assertEqual(test_fn(var1).data, test_fn(tensor1),
                             test_name)

        i = self.IndexTensor([[0, 0, 1], [1, 2, 1]])
        v = self.ValueTensor([3, 3, 4])
        sparse_mat2 = self.SparseTensor(i, v, torch.Size([2, 3]))
        sparse_var2 = Variable(sparse_mat2)

        to_test_two_arg = {
            'sub': lambda x, y: x.sub(y),
            'sub_': lambda x, y: x.sub_(y),
            'mul': lambda x, y: x.mul(y),
            'mul_': lambda x, y: x.mul_(y),
        }

        for test_name, test_fn in to_test_two_arg.items():
            var1 = sparse_var.clone()
            var2 = sparse_var2.clone()
            tensor1 = sparse_mat.clone()
            tensor2 = sparse_mat2.clone()
            self.assertEqual(test_fn(var1, var2).data,
                             test_fn(tensor1, tensor2), test_name)

        to_test_mixed = [
            # test name, lambda expression, should_run_when_cuda
            ('sspaddmm', lambda sp, de: sp.sspaddmm(sp, de), False),
            ('sspaddmm_b', lambda sp, de: sp.sspaddmm(2, sp, de), False),
            ('sspaddmm_b_a', lambda sp, de: sp.sspaddmm(3, 2, sp, de), False),
            ('addmm', lambda sp, de: de.addmm(sp, de), True),
            # TODO: This looks like a typo
            ('addmm_', lambda sp, de: de.addmm(sp, de), True),
            ('mm', lambda sp, de: torch.mm(sp, de), True),
            ('mm_out', lambda sp, de: torch.mm(sp, de, out=de), True),
        ]

        i = self.IndexTensor([[0, 0, 1, 2, 2], [1, 2, 1, 0, 1]])
        v = self.ValueTensor([3, 3, 4, 1, 2])
        sparse_mat = self.SparseTensor(i, v, torch.Size([3, 3]))
        sparse_var = Variable(sparse_mat)
        dense_mat = sparse_mat.to_dense().random_(0, 5)
        dense_var = Variable(dense_mat)

        for test_name, test_fn, test_cuda in to_test_mixed:
            if sparse_var.is_cuda and not test_cuda:
                continue
            sp_var = sparse_var.clone()
            de_var = dense_var.clone()
            sp_mat = sparse_mat.clone()
            de_mat = dense_mat.clone()
            self.assertEqual(test_fn(sp_var, de_var).data,
                             test_fn(sp_mat, de_mat), test_name)
开发者ID:gtgalone,项目名称:pytorch,代码行数:95,代码来源:test_sparse.py

示例9: UIModel

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
class UIModel(BaseModel):
    def name(self):
        return 'UIModel'

    def initialize(self, opt):
        assert(not opt.isTrain)
        BaseModel.initialize(self, opt)
        self.use_features = opt.instance_feat or opt.label_feat

        netG_input_nc = opt.label_nc
        if not opt.no_instance:
            netG_input_nc += 1            
        if self.use_features:   
            netG_input_nc += opt.feat_num           

        self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, 
                                      opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, 
                                      opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)            
        self.load_network(self.netG, 'G', opt.which_epoch)

        print('---------- Networks initialized -------------')

    def toTensor(self, img, normalize=False):
        tensor = torch.from_numpy(np.array(img, np.int32, copy=False))
        tensor = tensor.view(1, img.size[1], img.size[0], len(img.mode))    
        tensor = tensor.transpose(1, 2).transpose(1, 3).contiguous()
        if normalize:
            return (tensor.float()/255.0 - 0.5) / 0.5        
        return tensor.float()

    def load_image(self, label_path, inst_path, feat_path):
        opt = self.opt
        # read label map
        label_img = Image.open(label_path)    
        if label_path.find('face') != -1:
            label_img = label_img.convert('L')
        ow, oh = label_img.size    
        w = opt.loadSize
        h = int(w * oh / ow)    
        label_img = label_img.resize((w, h), Image.NEAREST)
        label_map = self.toTensor(label_img)           
        
        # onehot vector input for label map
        self.label_map = label_map.cuda()
        oneHot_size = (1, opt.label_nc, h, w)
        input_label = self.Tensor(torch.Size(oneHot_size)).zero_()
        self.input_label = input_label.scatter_(1, label_map.long().cuda(), 1.0)

        # read instance map
        if not opt.no_instance:
            inst_img = Image.open(inst_path)        
            inst_img = inst_img.resize((w, h), Image.NEAREST)            
            self.inst_map = self.toTensor(inst_img).cuda()
            self.edge_map = self.get_edges(self.inst_map)          
            self.net_input = Variable(torch.cat((self.input_label, self.edge_map), dim=1), volatile=True)
        else:
            self.net_input = Variable(self.input_label, volatile=True)  
        
        self.features_clustered = np.load(feat_path).item()
        self.object_map = self.inst_map if opt.instance_feat else self.label_map 
                       
        object_np = self.object_map.cpu().numpy().astype(int) 
        self.feat_map = self.Tensor(1, opt.feat_num, h, w).zero_()                 
        self.cluster_indices = np.zeros(self.opt.label_nc, np.uint8)
        for i in np.unique(object_np):    
            label = i if i < 1000 else i//1000
            if label in self.features_clustered:
                feat = self.features_clustered[label]
                np.random.seed(i+1)
                cluster_idx = np.random.randint(0, feat.shape[0])
                self.cluster_indices[label] = cluster_idx
                idx = (self.object_map == i).nonzero()                    
                self.set_features(idx, feat, cluster_idx)

        self.net_input_original = self.net_input.clone()        
        self.label_map_original = self.label_map.clone()
        self.feat_map_original = self.feat_map.clone()
        if not opt.no_instance:
            self.inst_map_original = self.inst_map.clone()        

    def reset(self):
        self.net_input = self.net_input_prev = self.net_input_original.clone()        
        self.label_map = self.label_map_prev = self.label_map_original.clone()
        self.feat_map = self.feat_map_prev = self.feat_map_original.clone()
        if not self.opt.no_instance:
            self.inst_map = self.inst_map_prev = self.inst_map_original.clone()
        self.object_map = self.inst_map if self.opt.instance_feat else self.label_map 

    def undo(self):        
        self.net_input = self.net_input_prev
        self.label_map = self.label_map_prev
        self.feat_map = self.feat_map_prev
        if not self.opt.no_instance:
            self.inst_map = self.inst_map_prev
        self.object_map = self.inst_map if self.opt.instance_feat else self.label_map 
            
    # get boundary map from instance map
    def get_edges(self, t):
        edge = torch.cuda.ByteTensor(t.size()).zero_()
        edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
#.........这里部分代码省略.........
开发者ID:pchan-pipeline,项目名称:pix2pixHD,代码行数:103,代码来源:ui_model.py

示例10: range

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
D_solver = optim.Adam(D_.parameters(), lr=lr)


for it in range(1000000):
    # Sample data
    X, _ = mnist.train.next_batch(mb_size)
    X = Variable(torch.from_numpy(X))

    # Discriminator
    z_hat = Q(X)

    # Do N step Gibbs sampling
    z = Variable(torch.randn(mb_size, z_dim))

    for _ in range(N):
        z_n = z.clone()
        X_hat = P(z_n)
        z = Q(X_hat)

    p_data = D(X, z_hat)
    p_model = D(X_hat, z_n)

    D_loss = -torch.mean(log(p_data) + log(1 - p_model))

    D_loss.backward(retain_graph=True)
    D_solver.step()
    G_solver.step()
    reset_grad()

    G_loss = -torch.mean(log(p_model) + log(1 - p_data))
开发者ID:lisafra,项目名称:generative-models,代码行数:32,代码来源:gibbsnet_pytorch.py

示例11: print

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
        #torch.nn.utils.clip_grad_norm([skel_r], 0.001)
        optimizer.step()
        
        if ii % 250 == 0:
            print(ii,
                  loss.data[0], 
                  c_loss.data[0],
                  seg_loss.data[0], 
                  cont_loss.data[0],
                  curv_loss.data[0]
                  )
    return skel_r, skel_map
         
#%%
results = []
skel_prev = skel_c.clone()
for tt in [10]:#range(1, 8, 2):#range(3, 50, 3):
    print('W {}'.format(tt))
    row, worm_roi, roi_corner = getROIfromInd(mask_file, 
                                              trajectories_data, 
                                              frame_number = ini_f + tt, 
                                              worm_index = w_ind, 
                                              roi_size=-1
                                              )
#%%
    w_roi = worm_roi.astype(np.float32)
    valid_pix = w_roi[w_roi!=0]
    bot = valid_pix.min()
    top = valid_pix.max()
    w_roi[w_roi==0] = top
    w_roi = (w_roi-bot)/(top-bot+1) + 1e-3
开发者ID:ver228,项目名称:Work_In_Progress,代码行数:33,代码来源:draw_maps.py

示例12: main

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
def main(config):
    
    # Image preprocessing
    # For normalization, see https://github.com/pytorch/vision#models
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), 
                             (0.229, 0.224, 0.225))])
    
    # Load content and style images
    # make content.size() == style.size() 
    content = load_image(config.content, transform, max_size=config.max_size)
    style = load_image(config.style, transform, shape=[content.size(2), content.size(3)])
    
    # Initialization and optimizer
    target = Variable(content.clone(), requires_grad=True)
    optimizer = torch.optim.Adam([target], lr=config.lr, betas=[0.5, 0.999])
    
    vgg = VGGNet()
    if use_cuda:
        vgg.cuda()
    
    for step in range(config.total_step):
        
        # Extract multiple(5) conv feature vectors
        target_features = vgg(target)
        content_features = vgg(Variable(content))
        style_features = vgg(Variable(style))

        style_loss = 0
        content_loss = 0
        for f1, f2, f3 in zip(target_features, content_features, style_features):
            # Compute content loss (target and content image)
            content_loss += torch.mean((f1 - f2)**2)

            # Reshape conv features
            _, c, h, w = f1.size()
            f1 = f1.view(c, h * w)
            f3 = f3.view(c, h * w)

            # Compute gram matrix  
            f1 = torch.mm(f1, f1.t())
            f3 = torch.mm(f3, f3.t())

            # Compute style loss (target and style image)
            style_loss += torch.mean((f1 - f3)**2) / (c * h * w) 

        # Compute total loss, backprop and optimize
        loss = content_loss + config.style_weight * style_loss 
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (step+1) % config.log_step == 0:
            print ('Step [%d/%d], Content Loss: %.4f, Style Loss: %.4f' 
                   %(step+1, config.total_step, content_loss.data[0], style_loss.data[0]))
    
        if (step+1) % config.sample_step == 0:
            # Save the generated image
            denorm = transforms.Normalize((-2.12, -2.04, -1.80), (4.37, 4.46, 4.44))
            img = target.clone().cpu().squeeze()
            img = denorm(img.data).clamp_(0, 1)
            torchvision.utils.save_image(img, 'output-%d.png' %(step+1))
开发者ID:AbhinavJain13,项目名称:pytorch-tutorial,代码行数:65,代码来源:main.py

示例13: log

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
        D_G_z1 = output.data.mean()
        errD = errD_real + errD_fake
        optimizerD.step()


        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        netG.zero_grad()
        label.data.fill_(real_label)  # fake labels are real for generator cost
        output = netD(fake)
        errG_D = criterion(output, label)
        # errG_D.backward(retain_variables=True)

        # errG_l2 = criterionMSE(fake,real_center)
        wtl2Matrix = real_center.clone()
        wtl2Matrix.data.fill_(wtl2*overlapL2Weight)
        wtl2Matrix.data[:,:,int(opt.overlapPred):int(opt.imageSize/2 - opt.overlapPred),int(opt.overlapPred):int(opt.imageSize/2 - opt.overlapPred)] = wtl2
        
        errG_l2 = (fake-real_center).pow(2)
        errG_l2 = errG_l2 * wtl2Matrix
        errG_l2 = errG_l2.mean()

        errG = (1-wtl2) * errG_D + wtl2 * errG_l2

        errG.backward()

        D_G_z2 = output.data.mean()
        optimizerG.step()

        print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f / %.4f l_D(x): %.4f l_D(G(z)): %.4f'
开发者ID:shubhampachori12110095,项目名称:context_encoder_pytorch,代码行数:33,代码来源:train.py

示例14: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
#     input_real, input_cropped = input_real.cuda(),input_cropped.cuda()
#     criterionMSE.cuda()
#     real_center = real_center.cuda()

input_real = Variable(input_real)
input_cropped = Variable(input_cropped)
real_center = Variable(real_center)


input_real.data.resize_(image.size()).copy_(image)
input_cropped.data.resize_(image.size()).copy_(image)
real_center_cpu = image[:,:,opt.imageSize/4:opt.imageSize/4+opt.imageSize/2,opt.imageSize/4:opt.imageSize/4+opt.imageSize/2]
real_center.data.resize_(real_center_cpu.size()).copy_(real_center_cpu)

input_cropped.data[:,0,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred] = 2*117.0/255.0 - 1.0
input_cropped.data[:,1,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred] = 2*104.0/255.0 - 1.0
input_cropped.data[:,2,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred,opt.imageSize/4+opt.overlapPred:opt.imageSize/4+opt.imageSize/2-opt.overlapPred] = 2*123.0/255.0 - 1.0

fake = netG(input_cropped)
errG = criterionMSE(fake,real_center)

recon_image = input_cropped.clone()
recon_image.data[:,:,opt.imageSize/4:opt.imageSize/4+opt.imageSize/2,opt.imageSize/4:opt.imageSize/4+opt.imageSize/2] = fake.data

utils.save_image('val_real_samples.png',image[0])
utils.save_image('val_cropped_samples.png',input_cropped.data[0])
utils.save_image('val_recon_samples.png',recon_image.data[0])

print('%.4f' % errG.data[0])

开发者ID:shubhampachori12110095,项目名称:context_encoder_pytorch,代码行数:31,代码来源:test_one.py

示例15: SAE

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import clone [as 别名]
        x = self.activation(self.fc2(x))
        x = self.activation(self.fc3(x))
        x = self.fc4(x)
        return x
sae = SAE()
criterion = nn.MSELoss()
optimizer = optim.RMSprop(sae.parameters(), lr = 0.01, weight_decay = 0.5)

# Training the SAE
nb_epoch = 200
for epoch in range(1, nb_epoch + 1):
    train_loss = 0
    s = 0.
    for id_user in range(nb_users):
        input = Variable(training_set[id_user]).unsqueeze(0)
        target = input.clone()
        if torch.sum(target.data > 0) > 0:
            output = sae(input)
            target.require_grad = False
            output[target == 0] = 0
            loss = criterion(output, target)
            mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)
            loss.backward()
            train_loss += np.sqrt(loss.data[0]*mean_corrector)
            s += 1.
            optimizer.step()
    print('epoch: '+str(epoch)+' loss: '+str(train_loss/s))

# Testing the SAE
test_loss = 0
s = 0.
开发者ID:Pr1yanshu,项目名称:RateIt,代码行数:33,代码来源:ae.py


注:本文中的torch.autograd.Variable.clone方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。