本文整理汇总了Python中skimage.util.img_as_ubyte函数的典型用法代码示例。如果您正苦于以下问题:Python img_as_ubyte函数的具体用法?Python img_as_ubyte怎么用?Python img_as_ubyte使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了img_as_ubyte函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_tag_detections
def get_tag_detections(im):
#
# Because of a bug in the tag detector, it doesn't seem
# to detect tags larger than a certain size. To work-around
# this limitation, we detect tags on two different image
# scales and use the one with more detections
#
assert len(im.shape) == 2
im4 = imrescale(im, 1./4)
im = img_as_ubyte(im)
im4 = img_as_ubyte(im4)
detections1 = AprilTagDetector().detect(im)
detections4 = AprilTagDetector().detect(im4)
for d in detections4:
d.c[0] *= 4.
d.c[1] *= 4.
# note that everything other than the tag center is wrong
# in detections4
if len(detections4) > len(detections1):
return detections4
else:
return detections1
示例2: insert_db
def insert_db(self, mode, image, label, features, channel_no, inverse):
if inverse:
image_ubyte = 255 - img_as_ubyte(image)
else:
image_ubyte = img_as_ubyte(image)
image_ubyte = numpy.transpose(image_ubyte, (2, 0, 1))
image_string = image_ubyte.tostring()
if features != None:
delimeter = '[email protected]#$'
self.datum.data = image_string + delimeter + features
elif channel_no > 3:
selem = disk(6)
w_tophat = white_tophat(image_ubyte, selem)
b_tophat = black_tophat(image_ubyte, selem)
self.datum.data = image_string + w_tophat.tostring() + b_tophat.tostring()
else:
self.datum.data = image_string
if label != None:
self.datum.label = int(label)
serialized = self.datum.SerializeToString()
if mode == 'train':
self.train_batch.Put("%08d" % self.train_no, serialized)
self.train_no += 1
elif mode == 'valid':
self.valid_batch.Put("%08d" % self.valid_no, serialized)
self.valid_no += 1
elif mode == 'test':
self.test_batch.Put("%08d" % self.test_no, serialized)
self.test_no += 1
示例3: mse
def mse(image_a, image_b):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
image_a = util.img_as_ubyte(image_a)
image_b = util.img_as_ubyte(image_b)
err = np.sum((image_a.astype("float") - image_b.astype("float")) ** 2)
err /= float(image_a.shape[0] * image_a.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
示例4: absolute_error
def absolute_error(image_a, image_b):
"""
Sum of pixel differences
Images - 2d numpy arrays
"""
image_a = util.img_as_ubyte(image_a)
image_b = util.img_as_ubyte(image_b)
return np.sum(
np.absolute(
image_a.view(np.ndarray).astype(np.int16) -
image_b.view(np.ndarray).astype(np.int16)
)
)
示例5: saver
def saver(stepName, img, dbg=None, mode=mode):
path = (processedDir / str(imgName)).with_suffix(".{}.png".format(stepName) if stepName else ".png")
if mode == 'cache' and processedDir and imgName:
mode = 'save'
if path.exists():
print("Loading cached image:", path)
img = ski.img_as_ubyte(io.imread(str(path)))
mode = 'done'
elif isinstance(img,type(None)):
print("Caching image:", path)
img = ski.img_as_ubyte(io.imread(str(imgName)))
assert not isinstance(img,type(None))
if mode == 'save' and processedDir and imgName:
try:
print("Saving:", img.shape, img.dtype, path.name, flush=True, )
pil_img = PIL.Image.fromarray(img_as_ubyte(img))
pil_img.save(str(path))
if dbg:
dbg.saved_path = path
except Exception as err:
print("Error Saving:",path, err, flush=True, )
elif mode == 'plot':
plt.imshow(img)
plt.suptitle(stepName+" "+imgName.name)
plt.show(block=True)
plt.close()
return img
示例6: compute
def compute(self, src):
image = img_as_ubyte(src)
# denoise image
denoised = denoise_tv_chambolle(image, weight=0.05)
denoised_equalize= exposure.equalize_hist(denoised)
# find continuous region (low gradient) --> markers
markers = rank.gradient(denoised_equalize, disk(5)) < 10
markers = ndi.label(markers)[0]
# local gradient
gradient = rank.gradient(denoised, disk(2))
# labels
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(2,3)
axes[0, 0].imshow(image)#, cmap=plt.cm.spectral, interpolation='nearest')
axes[0, 1].imshow(denoised, cmap=plt.cm.spectral, interpolation='nearest')
axes[0, 2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
axes[1, 0].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
axes[1, 1].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
plt.show()
示例7: fit
def fit(self, X, y=None):
num = self.patch_num // X.size
data = []
for item in X:
img = imread(str(item[0]))
img = img_as_ubyte(rgb2gray(img))
#img = self.binary(img) # 二值化
tmp = extract_patches_2d(img, self.patch_size, max_patches = num,\
random_state=np.random.RandomState())
data.append(tmp)
data = np.vstack(data)
data = data.reshape(data.shape[0], -1)
data = np.asarray(data, 'float32')
# 二值化后不需要0-1归化
data = data - np.min(data, 0)
data = data/(np.max(data, 0) + 0.0001) # 0-1 scaling
self.rbm = BernoulliRBM(n_components=self.n_components,\
learning_rate=self.learning_rate, \
n_iter=self.n_iter,\
batch_size=self.batch_size,\
verbose=True)
self.rbm.fit(data)
return self
示例8: save_windows
def save_windows(boxes, imagePath):
image_color = io.imread(imagePath, as_grey=False)
image_color = util.img_as_ubyte(image_color)
imageFilename = os.path.basename(imagePath) # Get the filename
imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension
annotationsFilePath = cfg.annotationsFolderPath+'gt.'+imageBasename+'.txt'
annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)
signalTypes = utils.readINRIAAnnotationsDetection(annotationsFilePath)
signalTypes = list(reversed(signalTypes))
count = 0
for box in boxes:
if box[0] < 0 or box[1] < 0:
continue
if box[2] >= image_color.shape[1].__int__() or \
box[3] >= image_color.shape[0].__int__():
continue
annotated = 'NONSIGNAL'
for idx in range(0, len(annotatedBoxes)):
aBox = annotatedBoxes[idx]
currentRatio = computeOverlap(box, aBox)
currentRatio = math.ceil(currentRatio*10)/10
if currentRatio > 0.5:
annotated = signalTypes[idx]
break
crop = image_color[box[1]:box[3],box[0]:box[2]]
imageName = imagePath.split('/') #Working on the crop name...
fileName = imageName[len(imageName)-1]
fileName = fileName[:len(fileName)-4]
fileName = (fileName+'.'+str(count))
filename = (fileName+'.'+annotated+'.jpg')
crop = resize(crop,(32,32))
io.imsave('Crops/'+filename, crop) #Save the crop
print('Crop saved')
count += 1
示例9: extractAndStoreFeatures
def extractAndStoreFeatures(inputFolder, outputFolder):
# List all files
fileList = os.listdir(inputFolder)
# Select only files that end with .png
imagesList = filter(lambda element: ".png" in element, fileList)
for filename in imagesList:
imagepath = inputFolder + "/" + filename
outputpath = outputFolder + "/" + filename + ".feat"
if os.path.exists(outputpath):
print "Features for " + imagepath + ". Delete the file if you want to replace."
continue
print "Extracting features for " + imagepath
image = io.imread(imagepath, as_grey=True)
# Read the image as bytes (pixels with values 0-255)
image = util.img_as_ubyte(image)
# Extract the features
feats = feature_extractor.extractFeatures(image)
# Save the features to a file
outputFile = open(outputpath, "wb")
pickle.dump(feats, outputFile)
outputFile.close()
示例10: HairRemover
def HairRemover(image, debug=None):
# =================================================================
# extract hair as morphologically thin structures
# -----------------------------------------------------------------
# convert to Lab color space
Lab_image = rgb2labnorm(image)
L = img_as_ubyte(Lab_image[..., 0])
# a hard threshold is then applied to the difference between
# the luminance before and after morphological closing
# the dark pigmented elements have a large intensity in the
# difference image
LClose = morph_close(L)
LDiff = LClose - L
# Threshold to create mask for inpainting
# set all pixels > 11.9 -> 255 and < 12 -> 0
# dilate by 1 to remove boundaries
threshold = 10.0 # original comment and code did not match... -JH
# threshold operation is directly performed on LDiff
mask = (morph_dilate(LDiff) >= threshold) * 1.
result = Inpainter(image, mask, 5)
if debug is not None:
debug["inpaintingMask"] = mask
debug["hairRemoved"] = result
return result
示例11: test_compare_8bit_vs_16bit
def test_compare_8bit_vs_16bit():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
image8 = util.img_as_ubyte(data.camera())
image16 = image8.astype(np.uint16)
assert_equal(image8, image16)
methods = [
"autolevel",
"bottomhat",
"equalize",
"gradient",
"maximum",
"mean",
"subtract_mean",
"median",
"minimum",
"modal",
"enhance_contrast",
"pop",
"threshold",
"tophat",
]
for method in methods:
func = getattr(rank, method)
f8 = func(image8, disk(3))
f16 = func(image16, disk(3))
assert_equal(f8, f16)
示例12: _write_to_file
def _write_to_file(self, new_bands, pan, **kwargs):
# Read coverage from QBA
coverage = self._calculate_cloud_ice_perc()
self.output("Final Steps", normal=True, arrow=True)
suffix = 'bands_%s_pan' % "".join(map(str, self.bands))
output_file = join(self.dst_path, self._filename(suffix=suffix))
output = rasterio.open(output_file, 'w', **kwargs)
for i, band in enumerate(new_bands):
# Color Correction
band = numpy.multiply(band, pan)
band = self._color_correction(band, self.bands[i], 0, coverage)
output.write_band(i + 1, img_as_ubyte(band))
new_bands[i] = None
self.output("Writing to file", normal=True, color='green', indent=1)
return output_file
示例13: extractAndStoreFeatures
def extractAndStoreFeatures(inputFolder, items, outputFolder):
extension = '.jpg'
X = np.zeros(shape=(cfg.num_train_images,cfg.num_features))
y = np.zeros(shape=(cfg.num_train_images,1))
number_of_images = 0
for index_label, name_label in enumerate(items): # For each item...
imagesPath = inputFolder + '/' + name_label # Each label corresponds to a folder
fileList = os.listdir(imagesPath) # List all files
imagesList = filter(lambda element: extension in element, fileList) # Select only the ones that ends with the desired extension
for filename in imagesList:
current_imagePath = imagesPath + '/' + filename
print 'Extracting features for ' + current_imagePath
image = io.imread(current_imagePath, as_grey=True)
image = util.img_as_ubyte(image) # Read the image as bytes (pixels with values 0-255)
X[number_of_images] = feature_extractor.extractFeatures(image) # Extract the features
y[number_of_images] = index_label # Assign the label at the end of X when saving the data set
number_of_images = number_of_images + 1
print number_of_images
#Save the data set to .data file in Data folder.
np.savetxt(
outputFolder, # file name
np.c_[X,y], # array to save
fmt='%.2f', # formatting, 2 digits in this case
delimiter=',', # column delimiter
newline='\n', # new line character
comments='# ') # character to use for comments
示例14: convert_to_saturation
def convert_to_saturation(fn, out_fn, rescale=True):
"""
Generate saturation channel as a grayscale image.
"""
# ImageMagick 18s
# execute_command('convert %(fn)s -colorspace HSL -channel G %(out_fn)s' % {'fn': fn, 'out_fn': out_fn})
# t = time.time()
img = imread(fn)
# sys.stderr.write('Read image: %.2f seconds\n' % (time.time() - t)) # ~4s
# t1 = time.time()
ma = img.max(axis=-1)
mi = img.min(axis=-1)
# sys.stderr.write('compute min and max color components: %.2f seconds\n' % (time.time() - t1)) # ~5s
# t1 = time.time()
s = np.nan_to_num(mi/ma.astype(np.float))
# sys.stderr.write('min oiver max: %.2f seconds\n' % (time.time() - t1)) # ~2s
# t1 = time.time()
if rescale:
pmax = s.max()
pmin = s.min()
s = (s - pmin) / (pmax - pmin)
# sys.stderr.write('rescale: %.2f seconds\n' % (time.time() - t1)) # ~3s
# t1 = time.time()
cv2.imwrite(out_fn, img_as_ubyte(s))
示例15: _write_to_file
def _write_to_file(self, new_bands, suffix=None, **kwargs):
# Read cloud coverage from mtl file
cloud_cover = self._read_cloud_cover()
self.output("Final Steps", normal=True, arrow=True)
output_file = '%s_bands_%s' % (self.scene, "".join(map(str, self.bands)))
if suffix:
output_file += suffix
output_file += '.TIF'
output_file = join(self.dst_path, output_file)
output = rasterio.open(output_file, 'w', **kwargs)
for i, band in enumerate(new_bands):
# Color Correction
band = self._color_correction(band, self.bands[i], 0, cloud_cover)
output.write_band(i+1, img_as_ubyte(band))
new_bands[i] = None
self.output("Writing to file", normal=True, color='green', indent=1)
return output_file