本文整理匯總了Python中scipy.ndimage.interpolation.affine_transform方法的典型用法代碼示例。如果您正苦於以下問題:Python interpolation.affine_transform方法的具體用法?Python interpolation.affine_transform怎麽用?Python interpolation.affine_transform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy.ndimage.interpolation
的用法示例。
在下文中一共展示了interpolation.affine_transform方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: random_rotation
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def random_rotation(volume, rotation):
theta_x = np.pi / 180 * np.random.uniform(-rotation, rotation)
theta_y = np.pi / 180 * np.random.uniform(-rotation, rotation)
theta_z = np.pi / 180 * np.random.uniform(-rotation, rotation)
rotation_matrix_x = np.array([[1, 0, 0],
[0, np.cos(theta_x), -np.sin(theta_x)],
[0, np.sin(theta_x), np.cos(theta_x)]])
rotation_matrix_y = np.array([[np.cos(theta_y), 0, np.sin(theta_y)],
[0, 1, 0],
[-np.sin(theta_y), 0, np.cos(theta_y)]])
rotation_matrix_z = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],
[np.sin(theta_z), np.cos(theta_z), 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(rotation_matrix_x, rotation_matrix_y), rotation_matrix_z)
volume_rotated = affine_transform(volume, transform_matrix, mode='nearest')
return volume_rotated
示例2: transform
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def transform(self, translation, theta, method='opencv'):
"""Create a new image by translating and rotating the current image.
Parameters
----------
translation : :obj:`numpy.ndarray` of float
The XY translation vector.
theta : float
Rotation angle in radians, with positive meaning counter-clockwise.
method : :obj:`str`
Method to use for image transformations (opencv or scipy)
Returns
-------
:obj:`Image`
An image of the same type that has been rotated and translated.
"""
theta = np.rad2deg(theta)
trans_map = np.float32(
[[1, 0, translation[1]], [0, 1, translation[0]]])
rot_map = cv2.getRotationMatrix2D(
(self.center[1], self.center[0]), theta, 1)
trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]
rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]
full_map = rot_map_aff.dot(trans_map_aff)
full_map = full_map[:2, :]
if method == 'opencv':
im_data_tf = cv2.warpAffine(
self.data, full_map, (self.width, self.height), flags=cv2.INTER_NEAREST)
else:
im_data_tf = sni.affine_transform(self.data,
matrix=full_map[:, :2],
offset=full_map[:, 2],
order=0)
return type(self)(
im_data_tf.astype(
self.data.dtype),
frame=self._frame)
示例3: scale_to_h
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def scale_to_h(img, target_height, order=1, dtype=np.dtype('f'), cval=0):
h, w = img.shape
scale = target_height * 1.0 / h
target_width = np.maximum(int(scale * w), 1)
output = interpolation.affine_transform(
1.0 * img,
np.eye(2) / scale,
order=order,
output_shape=(target_height,target_width),
mode='constant',
cval=cval)
output = np.array(output, dtype=dtype)
return output
示例4: scale_to_h
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def scale_to_h(img, target_height, order=1, dtype=np.dtype('f'), cval=0):
h, w = img.shape
scale = target_height*1.0/h
target_width = int(scale*w)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
output = interpolation.affine_transform(1.0*img, np.ones(2)/scale,
order=order,
output_shape=(target_height,
target_width),
mode='constant', cval=cval)
output = np.array(output, dtype=dtype)
return output
示例5: rotate
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def rotate(v, angle=None, rm=None, c1=None, c2=None, loc_r=None, siz2=None, default_val=float('NaN')):
if (angle is not None):
assert (rm is None)
angle = N.array(angle, dtype=N.float).flatten()
rm = AA.rotation_matrix_zyz(angle)
if (rm is None):
rm = N.eye(v.ndim)
siz1 = N.array(v.shape, dtype=N.float)
if (c1 is None):
c1 = ((siz1 - 1) / 2.0)
else:
c1 = c1.flatten()
assert (c1.shape == (3,))
if (siz2 is None):
siz2 = siz1
siz2 = N.array(siz2, dtype=N.float)
if (c2 is None):
c2 = ((siz2 - 1) / 2.0)
else:
c2 = c2.flatten()
assert (c2.shape == (3,))
if (loc_r is not None):
loc_r = N.array(loc_r, dtype=N.float).flatten()
assert (loc_r.shape == (3,))
c2 += loc_r
c = ((- rm.dot(c2)) + c1)
vr = SNI.affine_transform(input=v, matrix=rm, offset=c, output_shape=siz2.astype(N.int), cval=default_val)
return vr
示例6: resize_center
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def resize_center(v, s, cval=float('NaN')):
vs = N.array(v.shape, dtype=N.float)
from scipy.ndimage import interpolation
v1 = interpolation.affine_transform(input=v, matrix=N.eye(v.ndim), offset=(vs-s)/2.0, output_shape=s, cval=cval )
return v1
# given a dictionary of volumes, find the largest, then generate a new set of volumes of same size as the largest multiplied by a factor
示例7: augment
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def augment(images):
pixels = images[0].shape[1]
center = pixels/2.-0.5
random_flip_x = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
random_flip_y = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
# Translation shift
shift_x = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
shift_y = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
rotation_degrees = np.random.uniform(*P.AUGMENTATION_PARAMS['rotation_range'])
zoom_factor = np.random.uniform(*P.AUGMENTATION_PARAMS['zoom_range'])
#zoom_factor = 1 + (zoom_f/2-zoom_f*np.random.random())
if CV2_AVAILABLE:
M = cv2.getRotationMatrix2D((center, center), rotation_degrees, zoom_factor)
M[0, 2] += shift_x
M[1, 2] += shift_y
for i in range(len(images)):
image = images[i]
if CV2_AVAILABLE:
#image = image.transpose(1,2,0)
image = cv2.warpAffine(image, M, (pixels, pixels))
if random_flip_x:
image = cv2.flip(image, 0)
if random_flip_y:
image = cv2.flip(image, 1)
#image = image.transpose(2,0,1)
images[i] = image
else:
if random_flip_x:
#image = image.transpose(1,0)
image[:,:] = image[::-1,:]
#image = image.transpose(1,0)
if random_flip_y:
image = image.transpose(1,0)
image[:,:] = image[::-1,:]
image = image.transpose(1,0)
rotate(image, rotation_degrees, reshape=False, output=image)
#image2 = zoom(image, [zoom_factor,zoom_factor])
image2 = crop_or_pad(image, pixels, -3000)
shift(image2, [shift_x,shift_y], output=image)
#affine_transform(image, np.array([[zoom_x,0], [0,zoom_x]]), output=image)
#z = AffineTransform(scale=(2,2))
#image = warp(image, z.params)
images[i] = image
return images
示例8: distort_line
# 需要導入模塊: from scipy.ndimage import interpolation [as 別名]
# 或者: from scipy.ndimage.interpolation import affine_transform [as 別名]
def distort_line(im, distort=3.0, sigma=10, eps=0.03, delta=0.3):
"""
Distorts a line image.
Run BEFORE degrade_line as a white border of 5 pixels will be added.
Args:
im (PIL.Image): Input image
distort (float):
sigma (float):
eps (float):
delta (float):
Returns:
PIL.Image in mode 'L'
"""
w, h = im.size
# XXX: determine correct output shape from transformation matrices instead
# of guesstimating.
logger.debug('Pasting source image into canvas')
image = Image.new('L', (int(1.5*w), 4*h), 255)
image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2)))
line = pil2array(image.convert('L'))
# shear in y direction with factor eps * randn(), scaling with 1 + eps *
# randn() in x/y axis (all offset at d)
logger.debug('Performing affine transformation')
m = np.array([[1 + eps * np.random.randn(), 0.0], [eps * np.random.randn(), 1.0 + eps * np.random.randn()]])
c = np.array([w/2.0, h/2])
d = c - np.dot(m, c) + np.array([np.random.randn() * delta, np.random.randn() * delta])
line = affine_transform(line, m, offset=d, order=1, mode='constant', cval=255)
hs = gaussian_filter(np.random.randn(4*h, int(1.5*w)), sigma)
ws = gaussian_filter(np.random.randn(4*h, int(1.5*w)), sigma)
hs *= distort/np.amax(hs)
ws *= distort/np.amax(ws)
def _f(p):
return (p[0] + hs[p[0], p[1]], p[1] + ws[p[0], p[1]])
logger.debug('Performing geometric transformation')
im = array2pil(geometric_transform(line, _f, order=1, mode='nearest'))
logger.debug('Cropping canvas to content box')
im = im.crop(ImageOps.invert(im).getbbox())
return im