本文整理汇总了Python中skimage.transform.estimate_transform方法的典型用法代码示例。如果您正苦于以下问题:Python transform.estimate_transform方法的具体用法?Python transform.estimate_transform怎么用?Python transform.estimate_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类skimage.transform
的用法示例。
在下文中一共展示了transform.estimate_transform方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lfw_imgs
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def lfw_imgs(alignment):
if alignment == 'landmarks':
dataset = dp.dataset.LFW('original')
imgs = dataset.imgs
landmarks = dataset.landmarks('68')
n_landmarks = 68
landmarks_mean = np.mean(landmarks, axis=0)
landmarks_mean = np.array([landmarks_mean[:n_landmarks],
landmarks_mean[n_landmarks:]])
aligned_imgs = []
for img, points in zip(imgs, landmarks):
points = np.array([points[:n_landmarks], points[n_landmarks:]])
transf = transform.estimate_transform('similarity',
landmarks_mean.T, points.T)
img = img / 255.
img = transform.warp(img, transf, order=3)
img = np.round(img*255).astype(np.uint8)
aligned_imgs.append(img)
imgs = np.array(aligned_imgs)
else:
dataset = dp.dataset.LFW(alignment)
imgs = dataset.imgs
return imgs
示例2: fit
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def fit(self, data):
"""
Return the best 2D similarity transform from the points given in data.
data: N sets of similar corresponding triangles.
3 indices for a triangle in ref
and the 3 indices for the corresponding triangle in target;
arranged in a (N, 3, 2) array.
"""
d1, d2, d3 = data.shape
s, d = data.reshape(d1 * d2, d3).T
approx_t = estimate_transform(
"similarity", self.source[s], self.target[d]
)
return approx_t
示例3: test_find_transform_givensources
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def test_find_transform_givensources(self):
from skimage.transform import estimate_transform, matrix_transform
source = np.array(
[
[1.4, 2.2],
[5.3, 1.0],
[3.7, 1.5],
[10.1, 9.6],
[1.3, 10.2],
[7.1, 2.0],
]
)
nsrc = source.shape[0]
scale = 1.5 # scaling parameter
alpha = np.pi / 8.0 # rotation angle
mm = scale * np.array(
[[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]
)
tx, ty = 2.0, 1.0 # translation parameters
transl = np.array([nsrc * [tx], nsrc * [ty]])
dest = (mm.dot(source.T) + transl).T
t_true = estimate_transform("similarity", source, dest)
# disorder dest points so they don't match the order of source
np.random.shuffle(dest)
t, (src_pts, dst_pts) = aa.find_transform(source, dest)
self.assertLess(t_true.scale - t.scale, 1e-10)
self.assertLess(t_true.rotation - t.rotation, 1e-10)
self.assertLess(
np.linalg.norm(t_true.translation - t.translation), 1e-10
)
self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
self.assertEqual(src_pts.shape[1], 2)
self.assertEqual(dst_pts.shape[1], 2)
dst_pts_test = matrix_transform(src_pts, t.params)
self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10)
示例4: gen_data
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def gen_data(name):
reftracker = scio.loadmat('data/images_tracker.00047.mat')['tracker']
desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')['tracker']
refpos = np.floor(np.mean(reftracker, 0))
xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
#normalize x and y channels
xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
maskimg = Image.open('data/meanmask.png')
maskc = np.array(maskimg, dtype=np.float)
maskc = np.pad(maskc, (600, 600), 'minimum')
# warp is an inverse transform, and so src and dst must be reversed here
tform = transform.estimate_transform('affine', desttracker + 600, reftracker + 600)
img_data = skio.imread('data/images_data/'+name+'.jpg')
# save org mat
warpedxx = transform.warp(xxc, tform, output_shape=xxc.shape)
warpedyy = transform.warp(yyc, tform, output_shape=xxc.shape)
warpedmask = transform.warp(maskc, tform, output_shape=xxc.shape)
warpedxx = warpedxx[600:1400, 600:1200, :]
warpedyy = warpedyy[600:1400, 600:1200, :]
warpedmask = warpedmask[600:1400, 600:1200, :]
img_h, img_w, _ = img_data.shape
mat = np.zeros((img_h, img_w, 6), dtype=np.float)
mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
mat_plus[:, :, 0:3] = mat
mat_plus[:, :, 3] = warpedxx
mat_plus[:, :, 4] = warpedyy
mat_plus[:, :, 5] = warpedmask
示例5: estimate_coordinate_transform
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def estimate_coordinate_transform(source, target, method, **method_kwargs):
"""Calculates a transformation from a source list of coordinates to a
target list of coordinates.
Parameters
----------
source : Nx2 array
(x, y) coordinate pairs from source image.
target : Nx2 array
(x, y) coordinate pairs from target image. Must be same shape as
'source'.
method : string, optional
Method to use for transform estimation.
**method_kwargs : optional
Additional arguments can be passed in specific to the particular
method. For example, 'order' for a polynomial transform estimation.
Returns
-------
transform : skimage.transform._geometric.GeometricTransform
An skimage transform object.
See Also
--------
skimage.transform.estimate_transform
"""
return tf.estimate_transform(method, source, target, **method_kwargs)
示例6: check_if_ok
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def check_if_ok(self, numstars):
"Helper function with common test code for 3, 4, 5, and 6 stars"
from skimage.transform import estimate_transform, matrix_transform
if numstars > 6:
raise NotImplementedError
# x and y of stars in the ref frame (int's)
self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars]
self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars]
self.num_stars = numstars
# Fluxes of stars
self.star_f = np.array(numstars * [700.0])
(
self.image,
self.image_ref,
self.star_ref_pos,
self.star_new_pos,
) = simulate_image_pair(
shape=(self.h, self.w),
translation=(self.x_offset, self.y_offset),
rot_angle_deg=50.0,
num_stars=self.num_stars,
star_refx=self.star_refx,
star_refy=self.star_refy,
star_flux=self.star_f,
)
source = self.star_ref_pos
dest = self.star_new_pos.copy()
t_true = estimate_transform("similarity", source, dest)
# disorder dest points so they don't match the order of source
np.random.shuffle(dest)
t, (src_pts, dst_pts) = aa.find_transform(source, dest)
self.assertLess(t_true.scale - t.scale, 1e-10)
self.assertLess(t_true.rotation - t.rotation, 1e-10)
self.assertLess(
np.linalg.norm(t_true.translation - t.translation), 1.0
)
self.assertEqual(src_pts.shape[0], dst_pts.shape[0])
self.assertLessEqual(src_pts.shape[0], source.shape[0])
self.assertEqual(src_pts.shape[1], 2)
self.assertEqual(dst_pts.shape[1], 2)
dst_pts_test = matrix_transform(src_pts, t.params)
self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0)
示例7: process
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def process(self, input, image_info = None):
''' process image with crop operation.
Args:
input: (h,w,3) array or str(image path). image value range:1~255.
image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.
Returns:
pos: the 3D position map. (256, 256, 3).
'''
if isinstance(input, str):
try:
image = imread(input)
except IOError:
print("error opening file: ", input)
return None
else:
image = input
if image.ndim < 3:
image = np.tile(image[:,:,np.newaxis], [1,1,3])
if np.max(image_info.shape) > 4: # key points to get bounding box
kpt = image_info
if kpt.shape[0] > 3:
kpt = kpt.T
left = np.min(kpt[0, :]); right = np.max(kpt[0, :]);
top = np.min(kpt[1,:]); bottom = np.max(kpt[1,:])
else: # bounding box
bbox = image_info
left = bbox[0]; right = bbox[1]; top = bbox[2]; bottom = bbox[3]
old_size = (right - left + bottom - top)/2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
size = int(old_size*1.6)
# crop image
src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
tform = estimate_transform('similarity', src_pts, DST_PTS)
image = image/255.
cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))
# run our net
#st = time()
cropped_image = torch.from_numpy(cropped_image[np.newaxis, ...].transpose(0,3,1,2).astype(np.float32)).cuda()
cropped_pos = self.net_forward(cropped_image)*self.resolution_inp*1.1
#print 'net time:', time() - st
# restore
cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
z = cropped_vertices[2,:].copy()/tform.params[0,0]
cropped_vertices[2,:] = 1
vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
vertices = np.vstack((vertices[:2,:], z))
pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
return pos
示例8: process
# 需要导入模块: from skimage import transform [as 别名]
# 或者: from skimage.transform import estimate_transform [as 别名]
def process(self, image, bbox):
''' process image with crop operation.
Args:
input: (h,w,3) array or str(image path). image value range:1~255.
image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.
Returns:
pos: the 3D position map. (256, 256, 3).
'''
if image.ndim < 3:
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
left = bbox[0];
right = bbox[2]
top = bbox[1]
bottom = bbox[3]
old_size = (right - left + bottom - top) / 2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.14])
size = int(old_size * 1.318)
# crop image
src_pts = np.array([[center[0] - size / 2, center[1] - size / 2], [center[0] - size / 2, center[1] + size / 2],
[center[0] + size / 2, center[1] - size / 2]])
DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
tform = estimate_transform('similarity', src_pts, DST_PTS)
image = image / 255.
cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))
# run our net
# st = time()
cropped_pos = self.net_forward(cropped_image)
# print 'net time:', time() - st
crop_pos = cropped_pos.copy()
# restore
cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
z = cropped_vertices[2, :].copy() / tform.params[0, 0]
cropped_vertices[2, :] = 1
vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
vertices = np.vstack((vertices[:2, :], z))
pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
return pos