本文整理匯總了Python中skimage.util.img_as_ubyte方法的典型用法代碼示例。如果您正苦於以下問題:Python util.img_as_ubyte方法的具體用法?Python util.img_as_ubyte怎麽用?Python util.img_as_ubyte使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類skimage.util
的用法示例。
在下文中一共展示了util.img_as_ubyte方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: chooseFrame
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def chooseFrame(self):
ret, frame = self.vid.read()
fname = Path(self.filename)
output_path = self.config_path.parents[0] / "labeled-data" / fname.stem
if output_path.exists():
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = img_as_ubyte(frame)
img_name = (
str(output_path)
+ "/img"
+ str(self.currFrame).zfill(int(np.ceil(np.log10(self.numberFrames))))
+ ".png"
)
if self.cropping:
crop_img = frame[self.y1 : self.y2, self.x1 : self.x2]
cv2.imwrite(img_name, cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
else:
cv2.imwrite(img_name, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
else:
print(
"%s path not found. Please make sure that the video was added to the config file using the function 'deeplabcut.add_new_videos'."
% output_path
)
示例2: execute
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def execute(b64):
b64 = base64.b64decode(b64)
b64 = BytesIO(b64)
img = Image.open(b64)
img = np.array(img)
img = util.img_as_ubyte(img)
res_leaf = []
cor_size = (0, 0, img.shape[1], img.shape[0])
quadtree(img, res_leaf, cor_size, 0)
# fig, ax = plt.subplots(1)
# for rect in res_leaf:
# rect = patches.Rectangle((rect[0], rect[1]), rect[2], rect[3], linewidth=0.1, edgecolor='b', facecolor='none')
# ax.add_patch(rect)
# ax.imshow(img)
# plt.show()
b = balance(res_leaf, img.shape[1], img.shape[0])
s = symmetry(res_leaf, img.shape[1], img.shape[0])
e = equilibrium(res_leaf, img.shape[1], img.shape[0])
n = len(res_leaf)
return [b, s, e, n]
示例3: execute
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def execute(b64):
b64 = base64.b64decode(b64)
b64 = BytesIO(b64)
img = Image.open(b64)
img= np.array(img)
img_la = color.rgb2gray(img)
img_la = util.img_as_ubyte(img_la)
# 0.11 and 0.27, sigma = 1, from Measuring visual clutter
# See sigma here: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny
img_la = cv2.GaussianBlur(img_la, (7, 7), 1)
cd = cv2.Canny(img_la, 0.11, 0.27)
total = cd.shape[0] * cd.shape[1] # Total number of pixels
number_edges = np.count_nonzero(cd) # Number of edge pixels
contour_density = float(number_edges) / float(total) # Ratio
result = [contour_density]
return result
示例4: execute
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def execute(b64):
b64 = base64.b64decode(b64)
b64 = BytesIO(b64)
img = Image.open(b64)
img = np.array(img)
img = util.img_as_ubyte(img)
img = img.reshape(-1, 3)
img = [tuple(l) for l in img]
lum = []
for pixel in img:
# Based on: https://en.wikipedia.org/wiki/Luma_(video)
y = 0.2126 * pixel[0] + 0.7152 * pixel[1] + 0.0722 * pixel[2]
lum.append(y)
result = np.std(lum)
return [result]
示例5: execute
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def execute(b64):
b64 = base64.b64decode(b64)
b64 = BytesIO(b64)
img = Image.open(b64)
img= np.array(img)
img = util.img_as_ubyte(img)
# Convert the LAB space
lab = color.rgb2lab(img)
L = lab[:, :, 0]
A = lab[:, :, 1]
B = lab[:, :, 2]
# Get average and standard deviation for each value separately
meanL = np.mean(L)
stdL = np.std(L)
meanA = np.mean(A)
stdA = np.std(A)
meanB = np.mean(B)
stdB = np.std(B)
result = [meanL, stdL, meanA, stdA, meanB, stdB]
return result
示例6: img_data_to_tifs
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def img_data_to_tifs(data, times, crappify, max_scale=1.05):
np.warnings.filterwarnings('ignore')
lr_imgs = {}
lr_up_imgs = {}
hr_imgs = {}
for time_col in range(times):
try:
img = data[time_col].astype(np.float).copy()
img_max = img.max() * max_scale
if img_max == 0: continue #do not save images with no contents.
img /= img_max
down_img, down_up_img = crappify(img)
except:
continue
tag = (0, 0, time_col)
img = img_as_ubyte(img)
pimg = PIL.Image.fromarray(img, mode='L')
small_img = PIL.Image.fromarray(img_as_ubyte(down_img))
big_img = PIL.Image.fromarray(img_as_ubyte(down_up_img))
hr_imgs[tag] = pimg
lr_imgs[tag] = small_img
lr_up_imgs[tag] = big_img
np.warnings.filterwarnings('default')
return hr_imgs, lr_imgs, lr_up_imgs
示例7: save_img
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def save_img(fn, img):
if len(img.shape) == 2:
np.warnings.filterwarnings('ignore')
PIL.Image.fromarray(img_as_ubyte(img), mode='L').save(f'{fn}.tif')
np.warnings.filterwarnings('default')
else:
img8 = (img * 255.).astype(np.uint8)
np.save(fn.with_suffix('.npy'), img8, allow_pickle=False)
示例8: test_uint
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def test_uint(dtype):
data = np.arange(50, dtype=dtype)
data_scaled = data * 256 ** (data.dtype.itemsize - 1)
assert convert_to_uint8(data_scaled).dtype == np.uint8
assert np.all(data == convert_to_uint8(data_scaled))
assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
assert np.all(img_as_ubyte(data_scaled) == convert_to_uint8(data_scaled))
示例9: test_int
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def test_int(dtype):
data = np.arange(50, dtype=dtype)
data_scaled = data * 256 ** (data.dtype.itemsize - 1)
assert convert_to_uint8(data).dtype == np.uint8
assert convert_to_uint8(data_scaled).dtype == np.uint8
assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
assert np.all(2 * data == convert_to_uint8(data_scaled))
assert np.all(img_as_ubyte(data_scaled) == convert_to_uint8(data_scaled))
assert np.all(img_as_ubyte(data - 10) == convert_to_uint8(data - 10))
assert np.all(
img_as_ubyte(data_scaled - 10) == convert_to_uint8(data_scaled - 10)
)
示例10: test_float
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def test_float(dtype):
data = np.linspace(0, 0.5, 128, dtype=dtype, endpoint=False)
res = np.arange(128, dtype=np.uint8)
assert convert_to_uint8(data).dtype == np.uint8
assert np.all(convert_to_uint8(data) == res)
data = np.linspace(0, 1, 256, dtype=dtype)
res = np.arange(256, dtype=np.uint8)
assert np.all(convert_to_uint8(data) == res)
assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
assert np.all(img_as_ubyte(data - 0.5) == convert_to_uint8(data - 0.5))
示例11: test_bool
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def test_bool():
data = np.zeros((10, 10), dtype=np.bool)
data[2:-2, 2:-2] = 1
converted = convert_to_uint8(data)
assert converted.dtype == np.uint8
assert np.all(img_as_ubyte(data) == converted)
示例12: face_rectangle
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def face_rectangle(img, n_upsamples=1):
"""Find a face rectangle.
Parameters
----------
img : np.ndarray
Image of any dtype and number of channels.
Returns
-------
corners : list
List of tuples where each tuple represents the top left and bottom right coordinates of
the face rectangle. Note that these coordinates use the `(row, column)` convention. The
length of the list is equal to the number of detected faces.
faces : list
Instance of ``dlib.rectagles`` that can be used in other algorithm.
n_upsamples : int
Upsample factor to apply to the image before detection. Allows to recognize
more faces.
"""
if not isinstance(img, np.ndarray):
raise TypeError("The input needs to be a np.ndarray")
dlib_detector = dlib.get_frontal_face_detector()
faces = dlib_detector(img_as_ubyte(img), n_upsamples)
corners = []
for face in faces:
x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()
top_left = (y1, x1)
bottom_right = (y2, x2)
corners.append((top_left, bottom_right))
return corners, faces
示例13: landmarks_68
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def landmarks_68(img, rectangle, model_path=None):
"""Predict 68 face landmarks.
Parameters
----------
img : np.ndarray
Image of any dtype and number of channels.
rectangle : dlib.rectangle
Rectangle that represents the bounding box around a single face.
model_path : str or pathlib.Path, default=None
Path to where the pretrained model is located. If None then using the `CACHE_FOLDER` model.
Returns
-------
lm_points : np.ndarray
Array of shape `(68, 2)` where rows are different landmark points and the columns
are x and y coordinates.
original : dlib.full_object_detection
Instance of ``dlib.full_object_detection``.
"""
if model_path is None:
model_path = CACHE_FOLDER / "shape_predictor_68_face_landmarks.dat"
get_pretrained_68(model_path.parent)
else:
model_path = pathlib.Path(str(model_path))
if not model_path.is_file():
raise IOError("Invalid landmark model, {}".format(str(model_path)))
lm_predictor = dlib.shape_predictor(str(model_path))
original = lm_predictor(img_as_ubyte(img), rectangle)
lm_points = np.array([[p.x, p.y] for p in original.parts()])
return lm_points, original
示例14: GetPoseS
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def GetPoseS(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes):
""" Non batch wise pose estimation for video cap."""
if cfg["cropping"]:
ny, nx = checkcropping(cfg, cap)
PredictedData = np.zeros(
(nframes, dlc_cfg["num_outputs"] * 3 * len(dlc_cfg["all_joints_names"]))
)
pbar = tqdm(total=nframes)
counter = 0
step = max(10, int(nframes / 100))
while cap.isOpened():
if counter % step == 0:
pbar.update(step)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if cfg["cropping"]:
frame = img_as_ubyte(
frame[cfg["y1"] : cfg["y2"], cfg["x1"] : cfg["x2"]]
)
else:
frame = img_as_ubyte(frame)
pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
PredictedData[
counter, :
] = (
pose.flatten()
) # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
else:
nframes = counter
break
counter += 1
pbar.close()
return PredictedData, nframes
示例15: update
# 需要導入模塊: from skimage import util [as 別名]
# 或者: from skimage.util import img_as_ubyte [as 別名]
def update(self):
"""
Updates the image with the current slider index
"""
self.grab.Enable(True)
self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
self.figure, self.axes, self.canvas = self.image_panel.getfigure()
self.vid.set(1, self.currFrame)
ret, frame = self.vid.read()
frame = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if ret:
if self.cropping:
self.coords = (
self.cfg["x1"],
self.cfg["x2"],
self.cfg["y1"],
self.cfg["y2"],
)
frame = frame[
int(self.coords[2]) : int(self.coords[3]),
int(self.coords[0]) : int(self.coords[1]),
:,
]
else:
self.coords = None
self.ax = self.axes.imshow(frame, cmap=self.colormap)
self.axes.set_title(
str(
str(self.currFrame)
+ "/"
+ str(self.numberFrames - 1)
+ " "
+ self.filename
)
)
self.figure.canvas.draw()
else:
print("Invalid frame")