本文整理汇总了Python中torch.autograd.Variable.resize方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.resize方法的具体用法?Python Variable.resize怎么用?Python Variable.resize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.resize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: range
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import resize [as 别名]
if target[i] < 10:
new_inp.append(inp[i])
new_target.append(target[i:i+1])
return torch.cat(new_inp,0), torch.cat(new_target,0)
for epoch in range(0,30):
for i, (inp, target) in enumerate(train_loader):
inp, target = prune_by_label(inp,target)
inp = Variable(inp.cuda())
bs = inp.size(0)
inp = inp.resize(bs, 784)
target = Variable(target.cuda())
h1 = net.compute_h1(inp)
y, target_soft = net.compute_y(inp, target, mixup=True, visible_mixup=False)
loss = bce_loss(y, target_soft)
opt.zero_grad()
loss.backward()
opt.step()
if i == 0:
clear = True
else:
示例2: feature_extractor
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import resize [as 别名]
def feature_extractor():
#trainloader = Train_Data_Loader( VIDEO_DIR, resize_w=128, resize_h=171, crop_w = 112, crop_h = 112, nb_frames=16)
net = C3D(487)
print('net', net)
## Loading pretrained model from sports and finetune the last layer
net.load_state_dict(torch.load('/data1/miayuan/pretrained_models/c3d.pickle'))
if RUN_GPU :
net.cuda(0)
net.eval()
print('net', net)
feature_dim = 4096 if EXTRACTED_LAYER != 5 else 8192
video_list = os.listdir(VIDEO_DIR)
print('video_list', video_list)
if not os.path.isdir(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
f = h5py.File(os.path.join(OUTPUT_DIR, OUTPUT_NAME), 'w')
def count_files(directory, prefix_list):
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x+'*')) for x in prefix_list]
return cnt_list
for video_name in video_list:
video_path = os.path.join(VIDEO_DIR, video_name)
print('video_path', video_path)
#video = imageio.get_reader(video_path, 'ffmpeg')
#print('video', video)
all_cnt = count_files(video_path, ('image_'))
total_frames = all_cnt[0]
print 'Total frames: %d'%total_frames
valid_frames = total_frames/nb_frames * nb_frames
print 'Total validated frames: %d'%valid_frames
index_w = np.random.randint(resize_w - crop_w) ## crop
index_h = np.random.randint(resize_h - crop_h) ## crop
#features = np.array((valid_frames/nb_frames, feature_dim))
features = []
#print('features', features)
print 'NB features: %d' %(valid_frames/nb_frames)
#print(io.imread(os.path.join(video_path, 'image_{:04d}.jpg'.format(1))).shape)
for i in range(valid_frames/nb_frames) :
clip = np.array([resize(io.imread(os.path.join(video_path, 'image_{:04d}.jpg'.format(j))), output_shape=(resize_w, resize_h), preserve_range=True) for j in range(i * nb_frames+1, (i+1) * nb_frames+1)])
#clip = np.array([resize(video.get_data(j), output_shape=(resize_w, resize_h), preserve_range=True) for j in range(i * nb_frames, (i+1) * nb_frames)])
clip = clip[:, index_w: index_w+ crop_w, index_h: index_h+ crop_h, :]
clip = torch.from_numpy(np.float32(clip.transpose(3, 0, 1, 2)))
clip = Variable(clip).cuda() if RUN_GPU else Variable(clip)
clip = clip.resize(1, 3, nb_frames, crop_w, crop_h)
#print('clip', clip)
_, clip_output = net(clip, EXTRACTED_LAYER)
#print('clip_output', clip_output)
clip_feature = (clip_output.data).cpu()
features.append(clip_feature)
#features[i] = np.array(clip_feature)
features = torch.cat(features, 0)
features = features.numpy()
print('features', features)
fgroup = f.create_group(video_name)
fgroup.create_dataset('c3d_features', data=features)
fgroup.create_dataset('total_frames', data=np.array(total_frames))
fgroup.create_dataset('valid_frames', data=np.array(valid_frames))
#with open(os.path.join(OUTPUT_DIR, video_name[:-4]), 'wb') as f :
# pickle.dump( features, f )
print '%s has been processed...'%video_name