本文整理汇总了Python中sklearn.feature_extraction.image.PatchExtractor类的典型用法代码示例。如果您正苦于以下问题:Python PatchExtractor类的具体用法?Python PatchExtractor怎么用?Python PatchExtractor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PatchExtractor类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_patch_extractor_color
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
示例2: test_patch_extractor_all_patches
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
示例3: test_patch_extractor_max_patches
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
示例4: generate_data
def generate_data(img_folder, max_patches=0.001):
for fpath in get_img_filepaths(img_folder):
print ('Reading image', fpath)
patch_extractor = PatchExtractor(patch_size=(32,32),
max_patches=max_patches)
img_tensor = imread(fpath, mode='RGB')
# shape : (row, col, channels)
input_matrix = np.array([img_tensor])
# shape : (1, row, col, channels)
input_matrix = input_matrix/255.0 # Casting into 0 to 1 space which DNN models learn faster
patches = patch_extractor.transform(input_matrix)
# shape : (n_samples, row, col, channels)
patches = np.rollaxis(patches, axis=3, start=1)
# shape : (n_samples, channels, row, col)
small_patches = np.array([resize(patch) for patch in patches])
# shape : (n_samples, channels, max_x, max_y)
patches = np.array([p.reshape(p.shape[0] * p.shape[1] * p.shape[2])
for p in patches])
# shape : (n_samples, output_vector_size)
if False:
# Print out values to debug
print ("Shapes of tensors", small_patches.shape, patches.shape)
for i, (small, big) in enumerate(zip(small_patches, patches)):
small_img = np.rollaxis(small, axis=0, start=3)
if not os.path.exists('debug'):
os.makedirs('debug')
imsave('debug/small_patch_{}.jpg'.format(i), small_img)
imsave('debug/big_patch_{}.jpg'.format(i), vec2img(big))
yield small_patches, patches
示例5: convolutional_zca
def convolutional_zca(input, patch_size=(9, 9), max_patches=int(1e5)):
"""
This is an implementation of the convolutional ZCA whitening presented by
David Eigen in his phd thesis
http://www.cs.nyu.edu/~deigen/deigen-thesis.pdf
"Predicting Images using Convolutional Networks:
Visual Scene Understanding with Pixel Maps"
From paragraph 8.4:
A simple adaptation of ZCA to convolutional application is to find the
ZCA whitening transformation for a sample of local image patches across
the dataset, and then apply this transform to every patch in a larger image.
We then use the center pixel of each ZCA patch to create the conv-ZCA
output image. The operations of applying local ZCA and selecting the center
pixel can be combined into a single convolution kernel,
resulting in the following algorithm
(explained using RGB inputs and 9x9 kernel):
1. Sample 10M random 9x9 image patches (each with 3 colors)
2. Perform PCA on these to get eigenvectors V and eigenvalues D.
3. Optionally remove small eigenvalues, so V has shape [npca x 3 x 9 x 9].
4. Construct the whitening kernel k:
for each pair of colors (ci,cj),
set k[j,i, :, :] = V[:, j, x0, y0]^T * D^{-1/2} * V[:, i, :, :]
where (x0, y0) is the center pixel location (e.g. (5,5) for a 9x9 kernel)
:param input: 4D tensor of shape [batch_size, rows, col, channels]
:param patch_size: size of the patches extracted from the dataset
:param max_patches: max number of patches extracted from the dataset
:return: conv-zca whitened dataset
"""
# I don't know if it's correct or not.. but it seems to work
mean = np.mean(input, axis=(0, 1, 2))
input -= mean # center the data
n_imgs, h, w, n_channels = input.shape
patch_size = (patch_size, patch_size)
patches = PatchExtractor(patch_size=patch_size,
max_patches=max_patches).transform(input)
pca = PCA()
pca.fit(patches.reshape(patches.shape[0], -1))
# Transpose the components into theano convolution filter type
dim = (-1,) + patch_size + (n_channels,)
V = shared(pca.components_.reshape(dim).
transpose(0, 3, 1, 2).astype(input.dtype))
D = T.nlinalg.diag(1. / np.sqrt(pca.explained_variance_))
x_0 = int(np.floor(patch_size[0] / 2))
y_0 = int(np.floor(patch_size[1] / 2))
filter_shape = [n_channels, n_channels, patch_size[0], patch_size[1]]
image_shape = [n_imgs, n_channels, h, w]
kernel = T.zeros(filter_shape)
VT = V.dimshuffle(2, 3, 1, 0)
# V : 243 x 3 x 9 x 9
# VT : 9 x 9 x 3 x 243
# build the kernel
for i in range(n_channels):
for j in range(n_channels):
a = T.dot(VT[x_0, y_0, j, :], D).reshape([1, -1])
b = V[:, i, :, :].reshape([-1, patch_size[0] * patch_size[1]])
c = T.dot(a, b).reshape([patch_size[0], patch_size[1]])
kernel = T.set_subtensor(kernel[j, i, :, :], c)
kernel = kernel.astype(floatX)
input = input.astype(floatX)
input_images = T.tensor4(dtype=floatX)
conv_whitening = conv2d(input_images.dimshuffle((0, 3, 1, 2)),
kernel,
input_shape=image_shape,
filter_shape=filter_shape,
border_mode='full')
s_crop = [(patch_size[0] - 1) // 2,
(patch_size[1] - 1) // 2]
# e_crop = [s_crop[0] if (s_crop[0] % 2) != 0 else s_crop[0] + 1,
# s_crop[1] if (s_crop[1] % 2) != 0 else s_crop[1] + 1]
conv_whitening = conv_whitening[:, :, s_crop[0]:-s_crop[0], s_crop[
1]:-s_crop[1]]
conv_whitening = conv_whitening.dimshuffle(0, 2, 3, 1)
f_convZCA = function([input_images], conv_whitening)
return f_convZCA(input)
示例6: test_patch_extractor_max_patches_default
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
示例7: test_patch_extractor_fit
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
示例8: extract_patches
def extract_patches(self, patch_size, max_patches=None, random_state=None):
patch_extractor = PatchExtractor(patch_size=patch_size, max_patches=np.int(
max_patches / self.num_images()), random_state=random_state)
return patch_extractor.transform(self._images).astype(np.uint8)
示例9: test_patch_extractor_max_patches_default
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))