本文整理汇总了Python中skimage.io.imsave函数的典型用法代码示例。如果您正苦于以下问题:Python imsave函数的具体用法?Python imsave怎么用?Python imsave使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了imsave函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cut_char
def cut_char(pk):
page = get_object_or_404(Page, pk=pk)
page_img_path = page.get_image_path()
char_lst = Character.objects.filter(page_id=pk)
image = io.imread(page_img_path, 0)
binary = binarisation(image)
binary_image = (binary * 255).astype('ubyte')
char_dir = settings.CHARACTER_IMAGE_ROOT+ pk+'/'
if not os.path.exists(char_dir):
os.makedirs(char_dir)
for char in char_lst:
char_image = binary_image[char.top:char.bottom,char.left:char.right]
char_filename = char.id+'.png'
char_path = char_dir+char_filename
try:
io.imsave(char_path, char_image)
status = 0
if is_low_contrast(char_image):
status = -5
except:
char_filename = ''
status = -6
char.is_correct = status
char.image = char_filename
char.save()
append_char_stastics.delay(pk)
return 'cutchar:'+pk
示例2: save_segmented_image
def save_segmented_image(self, filepath_image, modality='t1c', show=False):
'''
Creates an image of original brain with segmentation overlay and save it in ./predictions
INPUT (1) str 'filepath_image': filepath to test image for segmentation, including file extension
(2) str 'modality': imaging modality to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
(3) bool 'show': If true, shows output image. defaults to False.
OUTPUT (1) if show is True, shows image of segmentation results
(2) if show is false, returns segmented image.
'''
modes = {'flair': 0, 't1': 1, 't1c': 2, 't2': 3}
segmentation = self.predict_image(filepath_image, show=False)
print 'segmentation = ' + str(segmentation)
img_mask = np.pad(segmentation, (16, 16), mode='edge')
ones = np.argwhere(img_mask == 1)
twos = np.argwhere(img_mask == 2)
threes = np.argwhere(img_mask == 3)
fours = np.argwhere(img_mask == 4)
test_im = io.imread(filepath_image)
test_back = test_im.reshape(5, 216, 160)[modes[modality]]
# overlay = mark_boundaries(test_back, img_mask)
gray_img = img_as_float(test_back)
# adjust gamma of image
image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
sliced_image = image.copy()
red_multiplier = [1, 0.2, 0.2]
yellow_multiplier = [1, 1, 0.25]
green_multiplier = [0.35, 0.75, 0.25]
blue_multiplier = [0, 0.25, 0.9]
print str(len(ones))
print str(len(twos))
print str(len(threes))
print str(len(fours))
# change colors of segmented classes
for i in xrange(len(ones)):
sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
for i in xrange(len(twos)):
sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
for i in xrange(len(threes)):
sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
for i in xrange(len(fours)):
sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier
#if show=True show the prediction
if show:
print 'Showing...'
io.imshow(sliced_image)
plt.show()
#save the prediction
print 'Saving...'
try:
mkdir_p('./predictions/')
io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
print 'prediction saved.'
except:
io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
print 'prediction saved.'
示例3: produce_smoothed_images
def produce_smoothed_images(get_component, replace_component, bins, output_path, paths):
start_img = io.imread(paths[0])
start_cdf = get_cdf(get_component(start_img), bins)
end_img = io.imread(paths[-1])
end_cdf = get_cdf(get_component(end_img), bins)
delta_cdf = end_cdf - start_cdf
for i, path in enumerate(paths[1:-1]):
percentage = i / len(paths[1:-1])
target_cdf = start_cdf + (delta_cdf * percentage)
img = io.imread(path)
values = get_component(img)
cdf = get_cdf(values, bins)
# In order to match the length of "bins" for the interpolation below
# we prepend a 0
target_cdf = numpy.insert(target_cdf, 0, 0)
cdf = numpy.insert(cdf, 0, 0)
matched = match(values, cdf, target_cdf, bins)
matched = matched.reshape(values.shape)
img = replace_component(img, matched)
result_path = os.path.join(output_path, os.path.basename(path))
io.imsave(result_path, img)
print('Done with', result_path)
示例4: rawFrameToImageFile
def rawFrameToImageFile(image, filename):
"""Writes a single raw image frame to image file.
The file type must be given, e.g. png or jpg.
The image need not be scaled beforehand, it is done prior
to writing out the image. Could be one of
BMP, JPG, JPEG, PNG, PPM, TIFF, XBM, XPM)
but the file types available depends
on the QT imsave plugin in use.
Args:
| image (np.ndarray): two-dimensional array representing an image
| filename (string): name of file to be written to, with extension
Returns:
| Nothing
Raises:
| No exception is raised.
"""
#normalise input image (img) data to between 0 and 1
from scipy import ndimage
image = (image - ndimage.minimum(image)) / (ndimage.maximum(image) - ndimage.minimum(image))
# http://scikit-image.org/docs/dev/api/skimage.io.html#imsave
import skimage.io as io
io.imsave(filename, image)
示例5: detect
def detect(path):
input_image = io.imread(path)
gnb = joblib.load('/home/qburst/Desktop/Emotion_detection/ED_dist/Classifier/gnb.pkl')
face_count, features, faces = p.feature_extraction(input_image)
if face_count:
emotions = gnb.predict(features)
print emotions
for d, emotion in zip(faces, emotions):
print d, emotion
if emotion == 0:
cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 0), 2)
elif emotion == 1:
cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255), 3)
elif emotion == 2:
cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (255, 255, 0), 3)
elif emotion == 3:
cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (255, 0, 0), 3)
io.imsave('Detected/emotion.jpg', input_image)
return face_count, 'Detected/emotion.jpg'
else:
return face_count, 'err'
示例6: run_quadrant_stitch
def run_quadrant_stitch(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',
re_quadrant_group=1):
"""Read images, stitched them, and write out to same directory.
Parameters
----------
fns : list of string
The filenames to be processed.
re_string : string, optional
The regular expression to match the filename.
re_quadrant_group : int, optional
The group from the re.match object that will contain quadrant info.
Returns
-------
fns_out : list of string
The output filenames
"""
qd = group_by_quadrant(fns, re_string, re_quadrant_group)
fns_out = []
for fn_pattern, fns in qd.items():
new_filename = '_'.join(fn_pattern) + '_stitched.tif'
ims = map(io.imread, sorted(fns))
im = quadrant_stitch(*ims)
io.imsave(new_filename, im)
fns_out.append(new_filename)
return fns_out
示例7: roundtrip
def roundtrip(self, dtype, x):
f = NamedTemporaryFile(suffix='.tif')
fname = f.name
f.close()
imsave(fname, x)
y = imread(fname)
assert_array_equal(x, y)
示例8: produce
def produce(solution_number, gsd, name=None):
print("{}/{}".format(solution_number+1, number_of_solutions))
cam_left = model.fexternal(solution_number)[0]
cam_right = model.fexternal(solution_number)[1]
corners_left = project_corners(model.finternal(solution_number), cam_left , pixel_size(model.finternal(solution_number)), image_shape, elevation)
corners_right = project_corners(model.finternal(solution_number), cam_right, pixel_size(model.finternal(solution_number)), image_shape, elevation)
world_rect = WorldRect.from_points(np.vstack([corners_left, corners_right]))
tile = FlatTile(world_rect, gsd)
tile.draw_cam_trace(corners_left)
tile.draw_cam_trace(corners_right)
tile.project_camera(model.finternal(solution_number), cam_left, elevation, left)
tile.project_camera(model.finternal(solution_number), cam_right, elevation, right)
tile.draw_observations(model.finternal(solution_number), cam_left, elevation, data_set.rows, data_set.cols, model.features.edges[0].obs_a)
tile.draw_observations(model.finternal(solution_number), cam_right, elevation, data_set.rows, data_set.cols, model.features.edges[0].obs_b)
tile.draw_obs_pair(model.finternal(solution_number),
cam_left,
cam_right,
elevation,
data_set.rows,
data_set.cols,
model.features.edges[0].obs_a,
model.features.edges[0].obs_b)
if name is None:
name = "iteration{}.jpg".format(solution_number)
io.imsave(os.path.join(tile_dir, name), tile.image)
示例9: main
def main():
# get a list of image filenames in a directory
base_directory = "/path/to/image/directory"
# create an output_directory
output_directory = os.path.join(base_directory, "output")
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# string formatting with leading zeros - we'll use this later
output_filename_template = os.path.join(output_directory, "output {:03d}.png")
# this will work if base_directory does not end with '/'
filenames = glob(base_directory + "/*.png")
# but it's better to safely join paths
filenames = glob(os.path.join(base_directory, "*.png"))
# will often want to process these in order
filenames = sorted(glob(os.path.join(base_directory, "*.png")))
# process each image
for i, filename in enumerate(filenames):
image = img_as_float(imread(filename))
result = vignette(image)
imsave(output_filename_template.format(i), result)
示例10: gen_ablation
def gen_ablation(imgIds = [], mode = 'blackout', ct = None, out_path="tmp", **args):
"""Perform specified ablation on every image specified by the imgIds list.
If no imgId is specified, will randomly sample an image with text.
return (imgId, old_img, new_img) list"""
imgs = ct.loadImgs(imgIds)
results = []
for idx, img in enumerate(imgs):
print("Ablating image {}/{}".format(idx+1, len(imgIds)))
ori_file_name = '%s/%s/%s'%(DATA_PATH,DATA_TYPE,img['file_name'])
orig = io.imread(ori_file_name)
annIds = ct.getAnnIds(imgIds=img['id'])
anns = ct.loadAnns(annIds)
if len(anns)==0:
print("[WARNING] Weirdly sampled an image without text contents:{}".format(img['file_name']))
running = orig
for ann in anns:
bbox = ann['bbox'] #format: [x,y,width,height]
if mode=='blackout':
running = blackout(running, bbox)
elif mode=='gaussian':
running = gaussian(running, bbox, ksize=args['ksize'], sigma = args['sigma'])
elif mode=='median':
running = median(running, bbox, width=args['width'])
out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
io.imsave(out_file_name, running)
results.append((img['id'], ori_file_name, out_file_name))
return results
示例11: ablate
def ablate(imgIds = [], mode ='destroy', out_path="tmp", coco = coco, ct = None, **args):
"""[ablation entry point 2.0]
Created to accomodate background-destroying ablation. Will dispatch all
old ablations (gaussian, blackout, & median) to gen_ablation."""
if ct is None:
ct = coco_text.COCO_Text(os.path.join(CD, 'COCO_Text.json'))
if imgIds == []:
imgIds = ct.getImgIds(imgIds=ct.train, catIds=[('legibility','legible')])
imgIds = [imgIds[np.random.randint(0,len(imgIds))]]
#dispatch to old ablation entry point
if mode in ['gaussian', 'blackout', 'median']:
return gen_ablation(imgIds, mode, ct, out_path=out_path, **args)
#else do destroy_bg
if coco is None:
coco = COCO('%s/annotations/instances_%s.json'%(DATA_PATH,DATA_TYPE))
imgs = coco.loadImgs(imgIds)
results = []
for idx, img in enumerate(imgs):
print("Ablating image {}/{} with id {} ".format(idx+1, len(imgIds), img['id']))
ori_file_name = os.path.join(CD, DATA_PATH, DATA_TYPE, img['file_name'])
orig = io.imread(ori_file_name)
if mode == 'destroy':
ablt = destroy_bg(orig, img['id'], coco, **args)
elif mode == 'median_bg':
ablt = median_bg(orig, img['id'], coco, **args)
out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
io.imsave(out_file_name, ablt)
results.append((img['id'], ori_file_name, out_file_name))
return results
示例12: run_illum
def run_illum(args):
"""Run illumination correction.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
"""
if args.file_list is not None:
args.images.extend([fn.rstrip() for fn in args.file_list])
il = pre.find_background_illumination(args.images, args.radius,
args.quantile, args.stretchlim,
args.use_mask, args.mask_offset,
args.mask_close, args.mask_erode)
if args.verbose:
print 'illumination field:', type(il), il.dtype, il.min(), il.max()
if args.save_illumination is not None:
io.imsave(args.save_illumination, il / il.max())
base_fns = [pre.basefn(fn) for fn in args.images]
ims_out = [fn + args.output_suffix for fn in base_fns]
mask_fns = [fn + '.mask.tif' for fn in base_fns]
ims = (io.imread(fn) for fn in args.images)
for im, fout, mask_fn in it.izip(ims, ims_out, mask_fns):
if os.path.isfile(mask_fn):
mask = io.imread(mask_fn).astype(bool)
else:
mask = np.ones(im.shape, bool)
im = pre.correct_image_illumination(im, il,
args.stretchlim_output, mask)
io.imsave(fout, im)
示例13: save_to_file
def save_to_file(self):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
示例14: run
def run(imfile, N, sigma, mu):
N = 2 if N is None else int(N)
sigma = 1.0 if sigma is None else float(sigma)
mu = 10.0 if mu is None else float(mu)
# read image
im0 = imread(imfile, as_grey=True)
# rescale to a common size
scale = 1e6 / float(im0.size)
im = rescale(im0, (scale, scale))
# estimate illumination profile
proc0 = NonUniformIllumination(N=N, sigma=sigma, mu=mu)
comp = proc0(im)
illum = proc0.profile
# # resize to original size
# illum = rescale(illum, (1.0/scale, 1.0/scale))
# illum = np.resize(illum, im0.shape)
fname = os.path.splitext(imfile)
illum = (illum - illum.min()) / (illum.max() - illum.min())
imsave(fname[0] + '-illum' + fname[1], illum)
comp = (comp - comp.min()) / (comp.max() - comp.min())
imsave(fname[0] + '-comp' + fname[1], comp)
return
示例15: deflicker
def deflicker():
#bins = numpy.arange(0, 1, 0.01)
bins = 256
for path, img in zip(paths, use_first(paths, bins)):
output_path = os.path.join(output_directory, os.path.basename(path))
io.imsave(output_path, img)