本文整理汇总了Python中scipy.misc.imsave方法的典型用法代码示例。如果您正苦于以下问题:Python misc.imsave方法的具体用法?Python misc.imsave怎么用?Python misc.imsave使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.misc
的用法示例。
在下文中一共展示了misc.imsave方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_frame
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def process_frame(frame_idx, img, model, write_to_dir, conf_threshold, input_size=224):
"""Finds bounding boxes in a video frame, draws these bounding boxes
and saves the result to HDD.
"""
# find BBs in frame
bbs, time_model = find_bbs(img, model, conf_threshold, input_size=input_size)
# draw BBs
img_out = np.copy(img)
for (bb, score) in bbs:
if score > conf_threshold and bb.width > 2 and bb.height > 2:
img_out = bb.draw_on_image(img_out, color=[0, 255, 0], thickness=3)
# save to output directory
save_to_fp = os.path.join(write_to_dir, "%05d.jpg" % (frame_idx,))
misc.imsave(save_to_fp, img_out)
return time_model
示例2: crop_det
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def crop_det(det_M, img):
global track_struct
crop_det_folder = track_struct['file_path']['crop_det_folder']
crop_size = track_struct['track_params']['crop_size']
if not os.path.isdir(crop_det_folder):
os.makedirs(crop_det_folder)
save_patch_list = []
for n in range(len(det_M)):
xmin = int(max(0,det_M[n,1]))
xmax = int(min(img.shape[1]-1,det_M[n,1]+det_M[n,3]))
ymin = int(max(0,det_M[n,2]))
ymax = int(min(img.shape[0]-1,det_M[n,2]+det_M[n,4]))
img_patch = img[ymin:ymax,xmin:xmax,:]
img_patch = misc.imresize(img_patch, size=[crop_size,crop_size])
patch_name = track_lib.file_name(n,4)+'.png'
save_path = crop_det_folder+'/'+patch_name
misc.imsave(save_path, img_patch)
save_patch_list.append(save_path)
return save_patch_list
示例3: main
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def main():
for file in os.listdir(data_image_dir):
if file.endswith(".png"):
print("Try to copy %s" % file)
im = misc.imread(os.path.join(data_image_dir, file), mode='RGB')
height, width, ch = im.shape
assert ch == IMAGE_DEPTH
if height == IMAGE_HEIGHT and width == IMAGE_WIDTH and ch == IMAGE_DEPTH:
misc.imsave(os.path.join(image_dir, file), im)
else:
print("Size: (%d, %d, %d) cannot be used." % (height, width, ch))
for file in os.listdir(data_label_dir):
if file.endswith(".png"):
print("Try to converting %s" % file)
gt_label = convert_to_label_data(os.path.join(data_label_dir, file))
if gt_label is not None:
misc.imsave(os.path.join(label_output_dir, file), gt_label)
示例4: setup
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def setup(self, pre_encode=False):
target_path = self.root + '/combined_annotations/'
if not os.path.exists(target_path):
os.makedirs(target_path)
if pre_encode:
print("Pre-encoding segmentation masks...")
for i in tqdm(self.sbd_train_list):
lbl_path = self.sbd_path + 'dataset/cls/' + i + '.mat'
lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
lbl = m.toimage(lbl, high=self.ignore_index, low=0)
m.imsave(target_path + i + '.png', lbl)
for i in tqdm(self.sbd_val_list):
lbl_path = self.sbd_path + 'dataset/cls/' + i + '.mat'
lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
lbl = m.toimage(lbl, high=self.ignore_index, low=0)
m.imsave(target_path + i + '.png', lbl)
for i in tqdm(self.files['trainval']):
lbl_path = self.voc_path + 'SegmentationClass/' + i + '.png'
lbl = self.encode_segmap(m.imread(lbl_path))
lbl = m.toimage(lbl, high=self.ignore_index, low=0)
m.imsave(target_path + i + '.png', lbl)
示例5: replace_eyes
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def replace_eyes(image, out_eyes, out_shape, out_path, n):
x_cen, y_cen, half_w, half_h = out_shape
copy_image = np.copy(image)
print(image.shape)
print(out_eyes.shape) # 41,51
out_eyes = np.squeeze(out_eyes, axis=0)
# resize and save as eyes only
# replace = cv2.resize(out_eyes, (51,41)) #(400, 250)
save_path_and_name = os.path.join(out_path, '{}.jpg'.format(n))
misc.imsave(save_path_and_name, out_eyes)
resize_replace = cv2.resize(out_eyes, (2*half_w, 2*half_h)) * 255 # resize to original
# resize_replace = np.transpose(resize_replace, axes=(1, 0, 2))
copy_image[(y_cen - half_h):(y_cen + half_h), (x_cen - half_w):(x_cen + half_w), :] = resize_replace.astype(np.uint8)
image_save_path_and_name = os.path.join(out_path, 'face_{}.jpg'.format(n))
# print(image_save_path_and_name)
misc.imsave(image_save_path_and_name, copy_image)
return None
示例6: data_store
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def data_store(path,action,reward,state):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
df = pd.DataFrame(action, columns=["Steering", "Throttle", "Brake"])
df["Reward"] = reward
df.to_csv(path +'car_racing_actions_rewards.csv', index=False)
for i in range(len(state)):
if rgb_mode == False:
image = rgb2gray(state[i])
else:
image = state[i]
misc.imsave( path + "img" + str(i) +".png", image)
示例7: save_HR_LR
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def save_HR_LR(img, size, path, idx):
HR_img = misc.imresize(img, size, interp='bicubic')
HR_img = modcrop(HR_img, 4)
rot180_img = misc.imrotate(HR_img, 180)
x4_img = misc.imresize(HR_img, 1 / 4, interp='bicubic')
x4_rot180_img = misc.imresize(rot180_img, 1 / 4, interp='bicubic')
img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
x4_img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
x4_rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
misc.imsave(save_HR_path + '/' + img_path, HR_img)
misc.imsave(save_HR_path + '/' + rot180img_path, rot180_img)
misc.imsave(save_LR_path + '/' + x4_img_path, x4_img)
misc.imsave(save_LR_path + '/' + x4_rot180img_path, x4_rot180_img)
示例8: kernel_summary
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def kernel_summary(sess):
with sess.as_default():
for layer in ["conv1", "conv2", "conv3"]:
with tf.variable_scope(layer, reuse=True):
weights = tf.get_variable('weights')
kernels = tf.unpack(tf.transpose(weights, perm=[3,2,0,1]))
for i,kernel in enumerate(kernels):
#[12, 6, 6] -> 12 x [8, 8]
padding = [[1,1], [1,1]]
padded_kernels = [tf.pad(single_kernel, padding) for single_kernel in tf.unpack(kernel)]
#12 x [8, 8] -> [6, 12 * 8]
horizontally_concatenated = tf.concat(1, padded_kernels)
image = horizontally_concatenated.eval()
misc.imsave(layer + "_" + str(i) + ".png", image)
示例9: get_saliency_for_shallownet
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def get_saliency_for_shallownet(image_url,sal_url):
arr_files = glob.glob(image_url+"*.jpg")
for i in range(len(arr_files)):
url_image = arr_files[i]
image = io.imread(url_image)
img = misc.imresize(image,(96,96))
img = np.asarray(img, dtype = 'float32') / 255.
img = img.transpose(2,0,1).reshape(3, 96, 96)
xt = np.zeros((1, 3, 96, 96), dtype='float32')
xt[0]=img
y = juntingnet.predict(xt)
tmp = y.reshape(48,48)
blured= ndimage.gaussian_filter(tmp, sigma=3)
sal_map = cv2.resize(tmp,(image.shape[1],image.shape[0]))
sal_map -= np.min(sal_map)
sal_map /= np.max(sal_map)
#saliency = misc.imresize(y,(img.shape[0],img.shape[1]))
aux = url_image.split("/")[-1].split(".")[0]
misc.imsave(sal_url+'/'+aux+'.png', sal_map)
示例10: dotplot2
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def dotplot2(s1, s2, wordsize=5, overlap=5, verbose=1):
""" verbose = 0 (no progress), 1 (progress if s1 and s2 are long) or
2 (progress in any case) """
doProgress = False
if verbose > 1 or len(s1)*len(s2) > 1e6:
doProgress = True
mat = numpy.ones(((len(s1)-wordsize)/overlap+2, (len(s2)-wordsize)/overlap+2))
for i in range(0, len(s1)-wordsize, overlap):
if i % 1000 == 0 and doProgress:
logging.info(" dotplot progress: {} of {} rows done".format(i, len(s1)-wordsize))
word1 = s1[i:i+wordsize]
for j in range(0, len(s2)-wordsize, overlap):
word2 = s2[j:j+wordsize]
if word1 == word2 or word1 == word2[::-1]:
mat[i/overlap, j/overlap] = 0
imgData = None
tempDir = tempfile.mkdtemp()
try:
path = os.path.join(tempDir, "dotplot.png")
misc.imsave(path, mat)
imgData = open(path).read()
except Exception as e:
logging.error("Error generating dotplots:'{}'".format(e))
finally:
shutil.rmtree(tempDir)
return imgData
示例11: save_images
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
示例12: imsave
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def imsave(images, size, path):
return misc.imsave(path, merge(images, size))
示例13: get_representation
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def get_representation(img, model, model_dir, out_path):
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(model_dir)
# print(ckpt)
# print(ckpt.model_checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
Ax = tf.placeholder(tf.float32, [model.batch_size,model.height,model.width,model.channel],name='Ax')
enc_Ax = model.splitter('encoder', Ax)
grad_att_1 =[tf.gradients(enc_Ax[0][:,:,:,i], Ax)[0] for i in range(128)]
grad_att_2 =[tf.gradients(enc_Ax[1][:,:,:,i], Ax)[0] for i in range(128)]
grad_att_3 =[tf.gradients(enc_Ax[2][:,:,:,i], Ax)[0] for i in range(256)]
# from IPython import embed;embed();exit()
grad_1 = sess.run(grad_att_1, feed_dict={Ax: img})
grad_2 = sess.run(grad_att_2, feed_dict={Ax: img})
grad_3 = sess.run(grad_att_3, feed_dict={Ax: img})
for i in range(128):
misc.imsave(os.path.join(out_path, '0_{:03d}.jpg'.format(i)), grad_1[i][0])
misc.imsave(os.path.join(out_path, '1_{:03d}.jpg'.format(i)), grad_2[i][0])
np.save(os.path.join(out_path, '0_{:03d}.npy'.format(i)), grad_2[i][0])
np.save(os.path.join(out_path, '1_{:03d}.npy'.format(i)), grad_2[i][0])
for i in range(256):
misc.imsave(os.path.join(out_path, '2_{:03d}.jpg'.format(i)), grad_3[i][0])
np.save(os.path.join(out_path, '2_{:03d}.npy'.format(i)), grad_3[i][0])
示例14: swap_attribute
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def swap_attribute(src_img, att_img, swap_list, model_dir, model, gpu):
'''
Input
src_img: the source image that you want to change its attribute
att_img: the attribute image that has certain attribute
swap_list: the swap id list
model_dir: the directory that contains the checkpoint, ckpt.* files
model: the DNA_GAN network that defined in train.py
gpu: for example, '0,1'. Use '' for cpu mode
Output
out1: src_img with attributes
out2: att_img without attributes
'''
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(model_dir)
# print(ckpt)
# print(ckpt.model_checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
Ax = tf.placeholder(tf.float32, [model.batch_size,model.height,model.width,model.channel],name='Ax')
Be = tf.placeholder(tf.float32, [model.batch_size,model.height,model.width,model.channel],name='Be')
enc_Ax = model.splitter('encoder', Ax)
enc_Be = model.splitter('encoder', Be)
enc_Ae, enc_Bx = model.swap_attribute(enc_Ax, enc_Be, swap_list)
Ae = model.joiner('decoder', enc_Ae)
Bx = model.joiner('decoder', enc_Bx)
out2, out1 = sess.run([Ae, Bx], feed_dict={Ax: att_img, Be:src_img})
swap = np.concatenate((src_img[0], att_img[0], out1[0], out2[0]), 1)
misc.imsave('swap.jpg', swap)
# misc.imsave('out1.jpg', out1[0])
# misc.imsave('out2.jpg', out2[0])
示例15: interpolation1_
# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imsave [as 别名]
def interpolation1_(src_img, att_img, inter_num, model_dir, model, gpu):
'''
Input
src_img: the source image that you want to change its attribute
att_img: the attribute image that has certain attribute
inter_num: number of interpolation points
model_dir: the directory that contains the checkpoint, ckpt.* files
model: the DNA_GAN network that defined in train.py
gpu: for example, '0,1'. Use '' for cpu mode
Output
out: [src_img, inter1, inter2, ..., inter_{inter_num}]
'''
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(model_dir)
# print(ckpt)
# print(ckpt.model_checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
B, src_feat = sess.run([model.B, model.e], feed_dict={model.Be: src_img})
att_feat = sess.run(model.x, feed_dict={model.Ax: att_img})
out = src_img[0]
for i in range(1, inter_num + 1):
lambda_i = i / float(inter_num)
out_i = sess.run(model.joiner('G_joiner', B, src_feat + (att_feat - src_feat) * lambda_i) )
out = np.concatenate((out, out_i[0]), axis=1)
# print(out.shape)
misc.imsave('interpolation2.jpg', out)