本文整理匯總了Python中util.util.util方法的典型用法代碼示例。如果您正苦於以下問題:Python util.util方法的具體用法?Python util.util怎麽用?Python util.util使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類util.util
的用法示例。
在下文中一共展示了util.util方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
real_A = util.tensor2im(self.real_A.data)
oderdict = OrderedDict([('real_A', real_A)])
fake_audio_B = {}
fake_image_B = {}
real_B = {}
for i in range(self.opt.sequence_length):
fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
oderdict['real_B_' + str(i)] = real_B[i]
oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]
oderdict['fake_image_B_' + str(i)] = fake_image_B[i]
return oderdict
示例2: get_latent_space_visualization
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1):
rand_perm = np.random.permutation( self.opt.n_classes )
if label_1 == -1:
label_1 = self.label[0] #rand_perm[0]
if label_2 == -1:
label_2 = self.opt.target_label #rand_perm[1]
alpha_blends = np.linspace(0,1,num_interpolate)
self.label[0] = label_1
output_gate_1 = self.netG.forward_gate(self.label)
self.label[0] = label_2
output_gate_2 = self.netG.forward_gate(self.label)
results={}
results['latent_real_A']=util.tensor2im(self.real_A.data)
results['latent_real_B']=util.tensor2im(self.real_B.data)
for i in range(num_interpolate):
alpha_blend = alpha_blends[i]
output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend)
self.fake_B = self.netG.forward_main( self.real_A,output_gate)
results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data)
return OrderedDict(results)
示例3: add_objects
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
y, x = click_src[0], click_src[1]
mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]
idx_src = torch.from_numpy(mask).cuda().nonzero()
idx_src[:,2] += y
idx_src[:,3] += x
# backup current maps
self.backup_current_state()
# update label map
self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
for k in range(self.opt.label_nc):
self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1
# update instance map
self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
# update feature map
self.set_features(idx_src, self.feat, style_id)
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
示例4: recurrent_test
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def recurrent_test(self, step=5):
input_size = self.input_A.cpu().shape
width,height = input_size[3], input_size[2]
results = []
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('real_{}_A'.format(0), real_A))
results.append(('fake_{}_B'.format(0), fake_B))
for i in range(1, step):
# rw = random.randint(0, width)
# rh = random.randint(0, height)
rw = int(width/2)
rh = int(height/2)
self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('real_{}_A'.format(i), real_A))
results.append(('fake_{}_B'.format(i), fake_B))
return OrderedDict(results)
示例5: recurrent_test_l2_searching
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def recurrent_test_l2_searching(self, step=5):
input_size = self.input_A.cpu().shape
width,height = input_size[3], input_size[2]
results = []
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('l2_search_real_{}_A'.format(0), real_A))
results.append(('l2_search_fake_{}_B'.format(0), fake_B))
for i in range(1, step):
# rw = random.randint(0, width)
# rh = random.randint(0, height)
rw, rh = self.l2_searching(self.real_A.clone(), self.fake_B.clone())
print("end selection: ", rw, rh)
self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('l2_search_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
results.append(('l2_search_fake_{}_B'.format(i), fake_B))
return OrderedDict(results)
示例6: random_crop
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def random_crop(self, crop_patch=6):
input_size = self.input_A.cpu().shape
width, height = input_size[3], input_size[2]
results = []
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG.forward(self.real_A)
src_fake_B = self.fake_B.clone()
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('real_A', real_A))
results.append(('fake_{}_B'.format('src'), fake_B))
for i in range(0, crop_patch):
rw = random.randint(0, width)
rh = random.randint(0, height)
self.real_A = Variable(src_fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('real_{}_{}_{}_A'.format(i, rw, rh), real_A))
results.append(('fake_{}_B'.format(i), fake_B))
return OrderedDict(results)
示例7: random_crop_256x256
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def random_crop_256x256(self, crop_patch=6):
input_size = self.input_A.cpu().shape
width, height = input_size[3], input_size[2]
results = []
self.real_A = Variable(self.input_A, volatile=True)
real_A_src = self.real_A.clone()
# self.fake_B = self.netG.forward(self.real_A)
# src_fake_B = self.fake_B.clone()
real_A = util.tensor2im(self.real_A.data)
# fake_B = util.tensor2im(self.fake_B.data)
results.append(('real_A', real_A))
# results.append(('fake_{}_B'.format('src'), fake_B))
for i in range(0, crop_patch):
rw = random.randint(0, width - 256)
rh = random.randint(0, height - 256)
self.real_A = Variable(real_A_src.data[:, :, rh:rh + 256, rw:rw + 256], volatile=True)
self.fake_B = self.netG.forward(self.real_A)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
results.append(('256_real_{}_{}_{}_A'.format(i, rw, rh), real_A))
results.append(('512_fake_{}_B'.format(i), fake_B))
return OrderedDict(results)
示例8: add_objects
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def add_objects(self, click_src, label_tgt, mask, style_id=0):
y, x = click_src[0], click_src[1]
mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]
idx_src = torch.from_numpy(mask).cuda().nonzero()
idx_src[:,2] += y
idx_src[:,3] += x
# backup current maps
self.backup_current_state()
# update label map
self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
for k in range(self.opt.label_nc):
self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1
# update instance map
self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
# update feature map
self.set_features(idx_src, self.feat, style_id)
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
示例9: forward
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def forward(self, input):
#print(input.shape)
_, self.c, self.h, self.w = input.size()
self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred)
shift_out = InnerShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.show_flow)
c_out = shift_out.size(1)
# get F_c, F_s, F_shift
F_c = shift_out.narrow(1, 0, c_out//3)
F_s = shift_out.narrow(1, c_out//3, c_out//3)
F_shift = shift_out.narrow(1, c_out*2//3, c_out//3)
F_fuse = F_c * F_shift
F_com = torch.cat([F_c, F_fuse], dim=1)
res_out = self.res_net(F_com)
F_c = F_c + res_out
final_out = torch.cat([F_c, F_s], dim=1)
if self.show_flow:
self.flow_srcs = InnerShiftTripleFunction.get_flow_src()
return final_out
示例10: forward
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def forward(self, input, flip_feat=None):
self.bz, self.c, self.h, self.w = input.size()
if self.device != 'cpu':
self._split_mask(self.bz)
else:
self.cur_mask = self.mask_all
self.mask = self.cur_mask
self.mask_flip = torch.flip(self.mask, [3])
self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred)
self.flag_flip = util.cal_flag_given_mask_thred(self.mask_flip, self.shift_sz, self.stride, self.mask_thred)
final_out = InnerFaceShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.flag_flip, self.show_flow, flip_feat)
if self.show_flow:
self.flow_srcs = InnerFaceShiftTripleFunction.get_flow_src()
innerFeat = input.clone().narrow(1, self.c//2, self.c//2)
return final_out, innerFeat
示例11: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
real_A = util.tensor2im(self.input_A)
fake_B = util.tensor2im(self.fake_B)
rec_A = util.tensor2im(self.rec_A)
real_B = util.tensor2im(self.input_B)
fake_A = util.tensor2im(self.fake_A)
rec_B = util.tensor2im(self.rec_B)
visuals = OrderedDict([
('real_A', real_A),
('fake_B', fake_B),
('rec_A', rec_A),
('real_B', real_B),
('fake_A', fake_A),
('rec_B', rec_B)
])
return visuals
示例12: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
real_A = util.tensor2im(self.real_A.data)
oderdict = OrderedDict([('real_A', real_A)])
fake_image_B = {}
real_B = {}
for i in range(self.opt.sequence_length):
fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data)
real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data)
oderdict['real_B_' + str(i)] = real_B[i]
oderdict['fake_image_B_' + str(i)] = fake_image_B[i]
return oderdict
示例13: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size)
real_A = util.tensor2im(self.real_A.data)
oderdict = OrderedDict([('real_A', real_A)])
fake_audio_B = {}
fake_image_B = {}
for i in range(self.opt.sequence_length):
fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data)
oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i]
return oderdict
示例14: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
real_A = util.tensor2im(self.input_A)
fake_B = util.tensor2im(self.fake_B)
rec_A = util.tensor2im(self.rec_A)
real_B = util.tensor2im(self.input_B)
fake_A = util.tensor2im(self.fake_A)
rec_B = util.tensor2im(self.rec_B)
ret_visuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A),
('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])
if self.opt.isTrain and self.opt.identity > 0.0:
ret_visuals['idt_A'] = util.tensor2im(self.idt_A)
ret_visuals['idt_B'] = util.tensor2im(self.idt_B)
return ret_visuals
示例15: get_current_visuals
# 需要導入模塊: from util import util [as 別名]
# 或者: from util.util import util [as 別名]
def get_current_visuals(self):
real_A0 = util.tensor2im(self.input_A0)
real_A1 = util.tensor2im(self.input_A1)
real_A2 = util.tensor2im(self.input_A2)
fake_B0 = util.tensor2im(self.fake_B0)
fake_B1 = util.tensor2im(self.fake_B1)
fake_B2 = util.tensor2im(self.fake_B2)
rec_A = util.tensor2im(self.rec_A)
real_B0 = util.tensor2im(self.input_B0)
real_B1 = util.tensor2im(self.input_B1)
real_B2 = util.tensor2im(self.input_B2)
fake_A0 = util.tensor2im(self.fake_A0)
fake_A1 = util.tensor2im(self.fake_A1)
fake_A2 = util.tensor2im(self.fake_A2)
rec_B = util.tensor2im(self.rec_B)
pred_A2 = util.tensor2im(self.pred_A2)
pred_B2 = util.tensor2im(self.pred_B2)
ret_visuals = OrderedDict([('real_A0', real_A0), ('fake_B0', fake_B0),
('real_A1', real_A1), ('fake_B1', fake_B1),
('fake_B2', fake_B2), ('rec_A', rec_A), ('real_A2', real_A2),
('real_B0', real_B0), ('fake_A0', fake_A0),
('real_B1', real_B1), ('fake_A1', fake_A1),
('fake_A2', fake_A2), ('rec_B', rec_B), ('real_B2', real_B2),
('real_A2', real_A2), ('pred_A2', pred_A2),
('real_B2', real_B2), ('pred_B2', pred_B2)])
if self.opt.isTrain and self.opt.identity > 0.0:
ret_visuals['idt_A'] = util.tensor2im(self.idt_A)
ret_visuals['idt_B'] = util.tensor2im(self.idt_B)
return ret_visuals