當前位置: 首頁>>代碼示例>>Python>>正文


Python util.tensor2im方法代碼示例

本文整理匯總了Python中util.util.tensor2im方法的典型用法代碼示例。如果您正苦於以下問題:Python util.tensor2im方法的具體用法?Python util.tensor2im怎麽用?Python util.tensor2im使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在util.util的用法示例。


在下文中一共展示了util.tensor2im方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_current_visuals(self):
        fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_audio_B = {}
        fake_image_B = {}
        real_B = {}
        for i in range(self.opt.sequence_length):
            fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
            fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
            real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
            oderdict['real_B_' + str(i)] = real_B[i]
            oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]
            oderdict['fake_image_B_' + str(i)] = fake_image_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:19,代碼來源:Gen_final_v1.py

示例2: get_latent_space_visualization

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1):
        rand_perm = np.random.permutation( self.opt.n_classes  )
        if label_1 == -1:
            label_1 = self.label[0] #rand_perm[0]
        if label_2 == -1:
            label_2 = self.opt.target_label #rand_perm[1]
        alpha_blends = np.linspace(0,1,num_interpolate)
        self.label[0] = label_1
        output_gate_1 = self.netG.forward_gate(self.label)
        self.label[0] = label_2
        output_gate_2 = self.netG.forward_gate(self.label)
        results={}
        results['latent_real_A']=util.tensor2im(self.real_A.data)
        results['latent_real_B']=util.tensor2im(self.real_B.data)

        for i in range(num_interpolate):
            alpha_blend = alpha_blends[i]
            output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend)
            self.fake_B = self.netG.forward_main( self.real_A,output_gate)

            results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data)

        return OrderedDict(results) 
開發者ID:arnabgho,項目名稱:iSketchNFill,代碼行數:25,代碼來源:label_channel_gated_pix2pix_model.py

示例3: add_objects

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
        y, x = click_src[0], click_src[1]
        mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]        
        idx_src = torch.from_numpy(mask).cuda().nonzero()        
        idx_src[:,2] += y
        idx_src[:,3] += x

        # backup current maps
        self.backup_current_state()

        # update label map
        self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt        
        for k in range(self.opt.label_nc):
            self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
        self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1            

        # update instance map
        self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
                
        # update feature map
        self.set_features(idx_src, self.feat, style_id)                
        
        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) 
開發者ID:Lotayou,項目名稱:everybody_dance_now_pytorch,代碼行數:26,代碼來源:ui_model.py

示例4: translation

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def translation(self, data):
        with torch.no_grad():
            self.prepare_data(data)
            img, attr_source, index_target, _ = self.current_data
            batch_size = img.size(0)
            assert batch_size == 2
            style_enc, _, _ = self.enc_style(img)
            style_target_enc = style_enc[index_target]
            attr_target = attr_source[index_target]
            content = self.enc_content(img)
            results_s2w, results_w2s = [('input_summer',tensor2im(img[0].data))], [('input_winter',tensor2im(img[1].data))]
            fakes = self.dec(content,torch.cat([attr_target,style_target_enc],dim=1))
            results_s2w.append(('s2w_enc',tensor2im(fakes[0].data)))
            results_w2s.append(('w2s_enc',tensor2im(fakes[1].data)))
            for i in range(self.opt.n_samples):
                style_rand = self.sample_latent_code(style_enc.size())
                fakes = self.dec(content,torch.cat([attr_target,style_rand],dim=1))
                results_s2w.append(('s2w_rand_{}'.format(i+1),tensor2im(fakes[0].data)))
                results_w2s.append(('w2s_rand_{}'.format(i+1),tensor2im(fakes[1].data)))
            return  results_s2w+results_w2s 
開發者ID:Xiaoming-Yu,項目名稱:DMIT,代碼行數:22,代碼來源:season_transfer_model.py

示例5: translation

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def translation(self, data):
        with torch.no_grad():
            img, cap_ori, cap_len_ori = data
            assert img.size(0) == 1
            img = img.repeat(len(TEST_SEQ)+1,1,1,1)
            cap_tar, cap_len_tar = [cap_ori], [cap_len_ori]
            for seq in TEST_SEQ:
                cap, cap_len = self.opt.txt_dataset.cap2ix(seq)
                cap = torch.LongTensor(cap).unsqueeze(0)
                cap_len = torch.LongTensor([cap_len])
                cap_tar.append(cap)
                cap_len_tar.append(cap_len)
            cap_tar = torch.cat(cap_tar,dim=0)
            cap_len_tar = torch.cat(cap_len_tar,dim=0)
            img, sent_emb, _, _ = self.prepare_data([img,cap_tar,cap_len_tar])
            style_enc, _, _ = self.enc_style(img)
            content = self.enc_content(img)
            fakes = self.dec(content,torch.cat([sent_emb,style_enc],dim=1))
            results = [('input',tensor2im(img[0].data)),
                       ('rec',tensor2im(fakes[0].data))]
            for i in range(len(TEST_SEQ)):
                results.append(('seq_{}'.format(i+1),tensor2im(fakes[i+1].data)))
            return results 
開發者ID:Xiaoming-Yu,項目名稱:DMIT,代碼行數:25,代碼來源:semantic_image_synthesis_model.py

示例6: recurrent_test

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def recurrent_test(self, step=5):
        input_size = self.input_A.cpu().shape
        width,height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_{}_A'.format(0), real_A))
        results.append(('fake_{}_B'.format(0), fake_B))
        for i in range(1, step):
            # rw = random.randint(0, width)
            # rh = random.randint(0, height)
            rw = int(width/2)
            rh = int(height/2)
            self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('real_{}_A'.format(i), real_A))
            results.append(('fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例7: recurrent_test_l2_searching

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def recurrent_test_l2_searching(self, step=5):
        input_size = self.input_A.cpu().shape
        width,height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('l2_search_real_{}_A'.format(0), real_A))
        results.append(('l2_search_fake_{}_B'.format(0), fake_B))
        for i in range(1, step):
            # rw = random.randint(0, width)
            # rh = random.randint(0, height)
            rw, rh = self.l2_searching(self.real_A.clone(), self.fake_B.clone())
            print("end selection: ", rw, rh)
            self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('l2_search_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('l2_search_fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例8: random_crop

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def random_crop(self, crop_patch=6):
        input_size = self.input_A.cpu().shape
        width, height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        src_fake_B = self.fake_B.clone()
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_A', real_A))
        results.append(('fake_{}_B'.format('src'), fake_B))
        for i in range(0, crop_patch):
            rw = random.randint(0, width)
            rh = random.randint(0, height)
            self.real_A = Variable(src_fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:23,代碼來源:test_model.py

示例9: random_crop_256x256

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def random_crop_256x256(self, crop_patch=6):
        input_size = self.input_A.cpu().shape
        width, height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        real_A_src = self.real_A.clone()
        # self.fake_B = self.netG.forward(self.real_A)
        # src_fake_B = self.fake_B.clone()
        real_A = util.tensor2im(self.real_A.data)
        # fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_A', real_A))
        # results.append(('fake_{}_B'.format('src'), fake_B))
        for i in range(0, crop_patch):
            rw = random.randint(0, width - 256)
            rh = random.randint(0, height - 256)
            self.real_A = Variable(real_A_src.data[:, :, rh:rh + 256, rw:rw + 256], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('256_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('512_fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例10: translation

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def translation(self, data):
        input, sourceD, targetD = self.prepare_image(data)
        sourceDC, sourceIndex = self.get_domain_code(sourceD)
        targetDC, targetIndex = self.get_domain_code(targetD)
        
        images, names =[], []
        for i in range(self.opt.d_num):
            images.append([tensor2im(input.index_select(0,sourceIndex[i])[0].data)])
            names.append(['D{}'.format(i)])
            
        if self.opt.mode == 'multimodal':
            for i in range(self.opt.n_samples):
                c_rand = self.sample_latent_code(torch.Size([input.size(0),self.opt.c_num]))
                targetC = torch.cat([targetDC, c_rand],1)
                output = self.G(input,targetC)
                for j in range(output.size(0)):
                    images[sourceD[j]].append(tensor2im(output[j].data))
                    names[sourceD[j]].append('{}to{}_{}'.format(sourceD[j],targetD[j],i)) 
        else:
            output = self.G(input,targetDC)
            for i in range(output.size(0)):
                images[sourceD[i]].append(tensor2im(output[i].data))
                names[sourceD[i]].append('{}to{}'.format(sourceD[i],targetD[i]))
            
        return  images, names 
開發者ID:Xiaoming-Yu,項目名稱:SingleGAN,代碼行數:27,代碼來源:single_gan.py

示例11: add_objects

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
        y, x = click_src[0], click_src[1]
        mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]
        idx_src = torch.from_numpy(mask).cuda().nonzero()
        idx_src[:,2] += y
        idx_src[:,3] += x

        # backup current maps
        self.backup_current_state()

        # update label map
        self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        for k in range(self.opt.label_nc):
            self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
        self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1

        # update instance map
        self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)

        # update feature map
        self.set_features(idx_src, self.feat, style_id)

        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) 
開發者ID:thomasjhuang,項目名稱:deep-learning-for-document-dewarping,代碼行數:26,代碼來源:ui_model.py

示例12: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_current_visuals(self):
        real_A = util.tensor2im(self.input_A)
        fake_B = util.tensor2im(self.fake_B)
        rec_A = util.tensor2im(self.rec_A)
        real_B = util.tensor2im(self.input_B)
        fake_A = util.tensor2im(self.fake_A)
        rec_B = util.tensor2im(self.rec_B)
        visuals = OrderedDict([
            ('real_A', real_A),
            ('fake_B', fake_B),
            ('rec_A', rec_A),
            ('real_B', real_B),
            ('fake_A', fake_A),
            ('rec_B', rec_B)
        ])
        return visuals 
開發者ID:ranery,項目名稱:Bayesian-CycleGAN,代碼行數:18,代碼來源:CycleGAN.py

示例13: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_current_visuals(self):
        fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_image_B = {}
        real_B = {}
        for i in range(self.opt.sequence_length):
            fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
            real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
            oderdict['real_B_' + str(i)] = real_B[i]
            oderdict['fake_image_B_' + str(i)] = fake_image_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:15,代碼來源:Test_Video_Model.py

示例14: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_current_visuals(self):
        fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_audio_B = {}
        fake_image_B = {}
        for i in range(self.opt.sequence_length):
            fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
            oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:13,代碼來源:Test_Audio_Model.py

示例15: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import tensor2im [as 別名]
def get_current_visuals(self):
        real_A = util.tensor2im(self.input_A)
        fake_B = util.tensor2im(self.fake_B)
        rec_A = util.tensor2im(self.rec_A)
        real_B = util.tensor2im(self.input_B)
        fake_A = util.tensor2im(self.fake_A)
        rec_B = util.tensor2im(self.rec_B)
        ret_visuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A),
                                   ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])
        if self.opt.isTrain and self.opt.identity > 0.0:
            ret_visuals['idt_A'] = util.tensor2im(self.idt_A)
            ret_visuals['idt_B'] = util.tensor2im(self.idt_B)
        return ret_visuals 
開發者ID:aayushbansal,項目名稱:Recycle-GAN,代碼行數:15,代碼來源:cycle_gan_model.py


注:本文中的util.util.tensor2im方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。