當前位置: 首頁>>代碼示例>>Python>>正文


Python util.util方法代碼示例

本文整理匯總了Python中util.util.util方法的典型用法代碼示例。如果您正苦於以下問題:Python util.util方法的具體用法?Python util.util怎麽用?Python util.util使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在util.util的用法示例。


在下文中一共展示了util.util方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_audio_B = {}
        fake_image_B = {}
        real_B = {}
        for i in range(self.opt.sequence_length):
            fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
            fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
            real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
            oderdict['real_B_' + str(i)] = real_B[i]
            oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]
            oderdict['fake_image_B_' + str(i)] = fake_image_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:19,代碼來源:Gen_final_v1.py

示例2: get_latent_space_visualization

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1):
        rand_perm = np.random.permutation( self.opt.n_classes  )
        if label_1 == -1:
            label_1 = self.label[0] #rand_perm[0]
        if label_2 == -1:
            label_2 = self.opt.target_label #rand_perm[1]
        alpha_blends = np.linspace(0,1,num_interpolate)
        self.label[0] = label_1
        output_gate_1 = self.netG.forward_gate(self.label)
        self.label[0] = label_2
        output_gate_2 = self.netG.forward_gate(self.label)
        results={}
        results['latent_real_A']=util.tensor2im(self.real_A.data)
        results['latent_real_B']=util.tensor2im(self.real_B.data)

        for i in range(num_interpolate):
            alpha_blend = alpha_blends[i]
            output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend)
            self.fake_B = self.netG.forward_main( self.real_A,output_gate)

            results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data)

        return OrderedDict(results) 
開發者ID:arnabgho,項目名稱:iSketchNFill,代碼行數:25,代碼來源:label_channel_gated_pix2pix_model.py

示例3: add_objects

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
        y, x = click_src[0], click_src[1]
        mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]        
        idx_src = torch.from_numpy(mask).cuda().nonzero()        
        idx_src[:,2] += y
        idx_src[:,3] += x

        # backup current maps
        self.backup_current_state()

        # update label map
        self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt        
        for k in range(self.opt.label_nc):
            self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
        self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1            

        # update instance map
        self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
                
        # update feature map
        self.set_features(idx_src, self.feat, style_id)                
        
        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) 
開發者ID:Lotayou,項目名稱:everybody_dance_now_pytorch,代碼行數:26,代碼來源:ui_model.py

示例4: recurrent_test

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def recurrent_test(self, step=5):
        input_size = self.input_A.cpu().shape
        width,height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_{}_A'.format(0), real_A))
        results.append(('fake_{}_B'.format(0), fake_B))
        for i in range(1, step):
            # rw = random.randint(0, width)
            # rh = random.randint(0, height)
            rw = int(width/2)
            rh = int(height/2)
            self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('real_{}_A'.format(i), real_A))
            results.append(('fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例5: recurrent_test_l2_searching

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def recurrent_test_l2_searching(self, step=5):
        input_size = self.input_A.cpu().shape
        width,height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('l2_search_real_{}_A'.format(0), real_A))
        results.append(('l2_search_fake_{}_B'.format(0), fake_B))
        for i in range(1, step):
            # rw = random.randint(0, width)
            # rh = random.randint(0, height)
            rw, rh = self.l2_searching(self.real_A.clone(), self.fake_B.clone())
            print("end selection: ", rw, rh)
            self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('l2_search_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('l2_search_fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例6: random_crop

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def random_crop(self, crop_patch=6):
        input_size = self.input_A.cpu().shape
        width, height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        self.fake_B = self.netG.forward(self.real_A)
        src_fake_B = self.fake_B.clone()
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_A', real_A))
        results.append(('fake_{}_B'.format('src'), fake_B))
        for i in range(0, crop_patch):
            rw = random.randint(0, width)
            rh = random.randint(0, height)
            self.real_A = Variable(src_fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:23,代碼來源:test_model.py

示例7: random_crop_256x256

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def random_crop_256x256(self, crop_patch=6):
        input_size = self.input_A.cpu().shape
        width, height = input_size[3], input_size[2]
        results = []
        self.real_A = Variable(self.input_A, volatile=True)
        real_A_src = self.real_A.clone()
        # self.fake_B = self.netG.forward(self.real_A)
        # src_fake_B = self.fake_B.clone()
        real_A = util.tensor2im(self.real_A.data)
        # fake_B = util.tensor2im(self.fake_B.data)
        results.append(('real_A', real_A))
        # results.append(('fake_{}_B'.format('src'), fake_B))
        for i in range(0, crop_patch):
            rw = random.randint(0, width - 256)
            rh = random.randint(0, height - 256)
            self.real_A = Variable(real_A_src.data[:, :, rh:rh + 256, rw:rw + 256], volatile=True)
            self.fake_B = self.netG.forward(self.real_A)
            real_A = util.tensor2im(self.real_A.data)
            fake_B = util.tensor2im(self.fake_B.data)
            results.append(('256_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
            results.append(('512_fake_{}_B'.format(i), fake_B))
        return OrderedDict(results) 
開發者ID:jessemelpolio,項目名稱:non-stationary_texture_syn,代碼行數:24,代碼來源:test_model.py

示例8: add_objects

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
        y, x = click_src[0], click_src[1]
        mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]
        idx_src = torch.from_numpy(mask).cuda().nonzero()
        idx_src[:,2] += y
        idx_src[:,3] += x

        # backup current maps
        self.backup_current_state()

        # update label map
        self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        for k in range(self.opt.label_nc):
            self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
        self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1

        # update instance map
        self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
        self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)

        # update feature map
        self.set_features(idx_src, self.feat, style_id)

        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) 
開發者ID:thomasjhuang,項目名稱:deep-learning-for-document-dewarping,代碼行數:26,代碼來源:ui_model.py

示例9: forward

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def forward(self, input):
        #print(input.shape)
        _, self.c, self.h, self.w = input.size()
        self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred)
        shift_out = InnerShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.show_flow)

        c_out = shift_out.size(1)
        # get F_c, F_s, F_shift
        F_c = shift_out.narrow(1, 0, c_out//3)
        F_s = shift_out.narrow(1, c_out//3, c_out//3)
        F_shift = shift_out.narrow(1, c_out*2//3, c_out//3)
        F_fuse = F_c * F_shift
        F_com = torch.cat([F_c, F_fuse], dim=1)

        res_out = self.res_net(F_com)
        F_c = F_c + res_out

        final_out = torch.cat([F_c, F_s], dim=1)

        if self.show_flow:
            self.flow_srcs = InnerShiftTripleFunction.get_flow_src()
        return final_out 
開發者ID:Zhaoyi-Yan,項目名稱:Shift-Net_pytorch,代碼行數:24,代碼來源:innerResShiftTriple.py

示例10: forward

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def forward(self, input, flip_feat=None):
        self.bz, self.c, self.h, self.w = input.size()
        if self.device != 'cpu':
            self._split_mask(self.bz)
        else:
            self.cur_mask = self.mask_all
        self.mask = self.cur_mask
        self.mask_flip = torch.flip(self.mask, [3])

        self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred)
        self.flag_flip = util.cal_flag_given_mask_thred(self.mask_flip, self.shift_sz, self.stride, self.mask_thred)

        final_out = InnerFaceShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.flag_flip, self.show_flow, flip_feat)
        if self.show_flow:
            self.flow_srcs = InnerFaceShiftTripleFunction.get_flow_src()

        innerFeat = input.clone().narrow(1, self.c//2, self.c//2)
        return final_out, innerFeat 
開發者ID:Zhaoyi-Yan,項目名稱:Shift-Net_pytorch,代碼行數:20,代碼來源:InnerFaceShiftTriple.py

示例11: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        real_A = util.tensor2im(self.input_A)
        fake_B = util.tensor2im(self.fake_B)
        rec_A = util.tensor2im(self.rec_A)
        real_B = util.tensor2im(self.input_B)
        fake_A = util.tensor2im(self.fake_A)
        rec_B = util.tensor2im(self.rec_B)
        visuals = OrderedDict([
            ('real_A', real_A),
            ('fake_B', fake_B),
            ('rec_A', rec_A),
            ('real_B', real_B),
            ('fake_A', fake_A),
            ('rec_B', rec_B)
        ])
        return visuals 
開發者ID:ranery,項目名稱:Bayesian-CycleGAN,代碼行數:18,代碼來源:CycleGAN.py

示例12: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_image_B = {}
        real_B = {}
        for i in range(self.opt.sequence_length):
            fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
            real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
            oderdict['real_B_' + str(i)] = real_B[i]
            oderdict['fake_image_B_' + str(i)] = fake_image_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:15,代碼來源:Test_Video_Model.py

示例13: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
        real_A = util.tensor2im(self.real_A.data)
        oderdict = OrderedDict([('real_A', real_A)])
        fake_audio_B = {}
        fake_image_B = {}
        for i in range(self.opt.sequence_length):
            fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
            oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]

        return oderdict 
開發者ID:Hangz-nju-cuhk,項目名稱:Talking-Face-Generation-DAVS,代碼行數:13,代碼來源:Test_Audio_Model.py

示例14: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        real_A = util.tensor2im(self.input_A)
        fake_B = util.tensor2im(self.fake_B)
        rec_A = util.tensor2im(self.rec_A)
        real_B = util.tensor2im(self.input_B)
        fake_A = util.tensor2im(self.fake_A)
        rec_B = util.tensor2im(self.rec_B)
        ret_visuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A),
                                   ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])
        if self.opt.isTrain and self.opt.identity > 0.0:
            ret_visuals['idt_A'] = util.tensor2im(self.idt_A)
            ret_visuals['idt_B'] = util.tensor2im(self.idt_B)
        return ret_visuals 
開發者ID:aayushbansal,項目名稱:Recycle-GAN,代碼行數:15,代碼來源:cycle_gan_model.py

示例15: get_current_visuals

# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
        real_A0 = util.tensor2im(self.input_A0)
        real_A1 = util.tensor2im(self.input_A1)
        real_A2 = util.tensor2im(self.input_A2)

        fake_B0 = util.tensor2im(self.fake_B0)
        fake_B1 = util.tensor2im(self.fake_B1)
        fake_B2 = util.tensor2im(self.fake_B2)

        rec_A = util.tensor2im(self.rec_A)

        real_B0 = util.tensor2im(self.input_B0)
        real_B1 = util.tensor2im(self.input_B1)
        real_B2 = util.tensor2im(self.input_B2)

        fake_A0 = util.tensor2im(self.fake_A0)
        fake_A1 = util.tensor2im(self.fake_A1)
        fake_A2 = util.tensor2im(self.fake_A2)

        rec_B = util.tensor2im(self.rec_B)

        pred_A2 = util.tensor2im(self.pred_A2)
        pred_B2 = util.tensor2im(self.pred_B2)

        ret_visuals = OrderedDict([('real_A0', real_A0), ('fake_B0', fake_B0),
                                   ('real_A1', real_A1), ('fake_B1', fake_B1),
                                   ('fake_B2', fake_B2), ('rec_A', rec_A), ('real_A2', real_A2),
                                   ('real_B0', real_B0), ('fake_A0', fake_A0),
                                   ('real_B1', real_B1), ('fake_A1', fake_A1),
                                   ('fake_A2', fake_A2), ('rec_B', rec_B), ('real_B2', real_B2),
                                   ('real_A2', real_A2), ('pred_A2', pred_A2),
                                   ('real_B2', real_B2), ('pred_B2', pred_B2)])
        if self.opt.isTrain and self.opt.identity > 0.0:
            ret_visuals['idt_A'] = util.tensor2im(self.idt_A)
            ret_visuals['idt_B'] = util.tensor2im(self.idt_B)
        return ret_visuals 
開發者ID:aayushbansal,項目名稱:Recycle-GAN,代碼行數:38,代碼來源:reCycle_gan_model.py


注:本文中的util.util.util方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。