当前位置: 首页>>代码示例>>Python>>正文


Python image.extract_patches_2d方法代码示例

本文整理汇总了Python中sklearn.feature_extraction.image.extract_patches_2d方法的典型用法代码示例。如果您正苦于以下问题:Python image.extract_patches_2d方法的具体用法?Python image.extract_patches_2d怎么用?Python image.extract_patches_2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.feature_extraction.image的用法示例。


在下文中一共展示了image.extract_patches_2d方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_patch_grid

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def make_patch_grid(x, patch_size, patch_stride=1):
    '''x shape: (num_channels, rows, cols)'''
    x = x.transpose(2, 1, 0)
    patches = extract_patches_2d(x, (patch_size, patch_size))
    x_w, x_h, x_c  = x.shape
    num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
    patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
    patches = patches.transpose((0, 1, 4, 2, 3))
    #patches = np.rollaxis(patches, -1, 2)
    return patches 
开发者ID:awentzonline,项目名称:image-analogies,代码行数:12,代码来源:patch_matcher.py

示例2: preprocess

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def preprocess(self, image):
    # extract a random crop from the image with the target width and height
    return extract_patches_2d(image, (self.width, self.height), max_patches=1)[0] 
开发者ID:mogoweb,项目名称:aiexamples,代码行数:5,代码来源:patch_preprocessor.py

示例3: iterate_cifar

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def iterate_cifar(shapeInput, batch_size, shuffle=False, train=True):
    # iterator over patches of the cifar10 data set.
    files = []
    if train:
        for j in range(1, 6):
            files.append('data_batch_'+str(j))
    else:
        for j in range(1, 6):
            files.append('test_batch')
    data_idxs = np.random.permutation(len(files))
    data = []
    labels = []
    for j in range(len(files)):
        data_idx = j
        if shuffle:
            data_idx = data_idxs[j]
        file = files[data_idx]
        dict = unpickle('C:\\Paul\\cifar-10-batches-py\\'+file)
        ls = dict['labels']
        idxs = np.random.permutation(len(dict['data']))
        for i in range(len(dict['data'])):
            if shuffle:
                idx = idxs[i]
            else:
                idx = i
            stackedArray = np.dstack((dict['data'][idx][0:1024].reshape(32, 32),
                                      dict['data'][idx][1024:1024 * 2].reshape(32,32),
                                      dict['data'][idx][1024 * 2:1024 * 3].reshape(32, 32)))
            patches = image.extract_patches_2d(stackedArray, (shapeInput[0], shapeInput[1]), max_patches=1)
            #max = patches.max()+1.e-6
            patches = patches.astype(np.float32) / 256.0
            data.append(patches)
            labels.append(ls[idx])
            if len(data)>=batch_size:
                array = np.asarray(data).reshape(-1, shapeInput[0]*shapeInput[1]*3)
                data = []
                labels = []
                #print(len(dict['data'])*len(files)*patches.shape[0])
                yield array 
开发者ID:paulbertens,项目名称:rank-ordered-autoencoder,代码行数:41,代码来源:DataLoader.py

示例4: predict_image

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def predict_image(self, test_img, show=False):
        '''
        predicts classes of input image
        INPUT   (1) str 'test_image': filepath to image to predict on
                (2) bool 'show': True to show the results of prediction, False to return prediction
        OUTPUT  (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels
                (2) if show == True: displays segmentation results
        '''
        imgs = io.imread(test_img).astype('float').reshape(5,240,240)
        plist = []

        # create patches from an entire slice
        for img in imgs[:-1]:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (33,33))
            plist.append(p)
        patches = np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))

        # predict classes of each pixel based on model
        full_pred = self.model_comp.predict_classes(patches)
        fp1 = full_pred.reshape(208,208)
        if show:
            io.imshow(fp1)
            plt.show
        else:
            return fp1 
开发者ID:naldeborgh7575,项目名称:brain_segmentation,代码行数:29,代码来源:Segmentation_Models.py

示例5: slice_to_patches

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def slice_to_patches(self, filename):
        '''
        Converts an image to a list of patches with a stride length of 1. Use as input for image prediction.
        INPUT: str 'filename': path to image to be converted to patches
        OUTPUT: list of patched version of imput image.
        '''
        slices = io.imread(filename).astype('float').reshape(5,240,240)[:-1]
        plist=[]
        for slice in slices:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (h,w))
            plist.append(p)
        return np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3]))) 
开发者ID:naldeborgh7575,项目名称:brain_segmentation,代码行数:16,代码来源:patch_library.py

示例6: denoise

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def denoise(self, image, sigma=3, multiplier=10, n_iter=15, patch_size=8, noise_gain=1.15):
        # promote values to super
        self.noise_gain = noise_gain
        self.sigma = sigma

        # error handling
        if image.shape[0] != image.shape[1]:
            raise ValueError("Image must be square!")

        # set initial values
        self.image = image
        self.sigma = sigma
        self.multiplier = multiplier
        self.n_iter = n_iter
        self.patch_size = patch_size

        # compute further values
        self.image_size = image.shape[0]

        # prepare K-SVD
        patches = extract_patches_2d(self.image, (self.patch_size, self.patch_size))
        Y = np.array([p.reshape(self.patch_size**2) for p in patches]).T

        # iterate K-SVD
        for itr in range(self.n_iter):
            self.sparse_coding(Y)
            self.dictionary_update(Y)

        # reconstruct image
        # this was translated from the Matlab code in Michael Elads book
        # cf. Elad, M. (2010). Sparse and redundant representations:
        # from theory to applications in signal and image processing. New York: Springer.
        out = np.zeros(image.shape)
        weight = np.zeros(image.shape)
        logging.info("reconstructing")
        i = j = 0
        for k in range((self.image_size - self.patch_size + 1) ** 2):
            patch = np.reshape(np.matmul(self.dictionary.matrix, self.alphas[:, k]), (self.patch_size, self.patch_size))
            out[j:j + self.patch_size, i:i + self.patch_size] += patch
            weight[j:j + self.patch_size, i:i + self.patch_size] += 1
            if i < self.image_size - self.patch_size:
                i += 1
            else:
                i = 0
                j += 1
        out = np.divide(out + self.multiplier * self.image, weight + self.multiplier)
        return out, self.dictionary, self.alphas 
开发者ID:fubel,项目名称:sparselandtools,代码行数:49,代码来源:denoising.py

示例7: dir2tfrecords_cs

# 需要导入模块: from sklearn.feature_extraction import image [as 别名]
# 或者: from sklearn.feature_extraction.image import extract_patches_2d [as 别名]
def dir2tfrecords_cs (data_dir, out_path, Phi, patch_size, patches_per_image, suffix):
    Phi = Phi.astype (np.float32)
    if isinstance (patch_size, int):
        patch_size = (16,16)

    if not out_path.endswith(".tfrecords"):
        out_path += ".tfrecords"
    writer = tf.python_io.TFRecordWriter (out_path)
    for fn in tqdm (glob.glob (os.path.join (data_dir, "*." + suffix))) :
        """Read images (and convert to grayscale)."""
        im = Image.open (fn)
        if im.mode == 'RGB':
            im = im.convert ('L')
        im = np.asarray (im)

        """Extract patches."""
        patches = extract_patches_2d (im, patch_size)
        perm = np.random.permutation (len (patches))
        patches = patches [perm [:patches_per_image]]

        """Vectorize patches."""
        fs = patches.reshape (len (patches), -1)

        """Demean and normalize."""
        fs = fs -  np.mean (fs, axis=1, keepdims=True)
        fs = (fs / 255.0).astype (np.float32)

        """Measure the signal using sensing matrix `Phi`."""
        ys = np.transpose (Phi.dot (np.transpose (fs)))

        """Write singals and measurements to tfrecords file."""
        for y, f in zip (ys, fs):
            yraw = y.tostring ()
            fraw = f.tostring ()
            example = tf.train.Example (features=tf.train.Features (
                feature={
                    'y': _bytes_feature (yraw),
                    'f': _bytes_feature (fraw)
                }
            ))

            writer.write (example.SerializeToString ())

    writer.close () 
开发者ID:TAMU-VITA,项目名称:ALISTA,代码行数:46,代码来源:data.py


注:本文中的sklearn.feature_extraction.image.extract_patches_2d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。