當前位置: 首頁>>代碼示例>>Python>>正文


Python skimage.transform方法代碼示例

本文整理匯總了Python中skimage.transform方法的典型用法代碼示例。如果您正苦於以下問題:Python skimage.transform方法的具體用法?Python skimage.transform怎麽用?Python skimage.transform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在skimage的用法示例。


在下文中一共展示了skimage.transform方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: resize

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
           preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
    """A wrapper for Scikit-Image resize().
    Scikit-Image generates warnings on every call to resize() if it doesn't
    receive the right parameters. The right parameters depend on the version
    of skimage. This solves the problem by using different parameters per
    version. And it provides a central place to control resizing defaults.
    """
    if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
        # New in 0.14: anti_aliasing. Default it to False for backward
        # compatibility with skimage 0.13.
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range, anti_aliasing=anti_aliasing,
            anti_aliasing_sigma=anti_aliasing_sigma)
    else:
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range) 
開發者ID:dataiku,項目名稱:dataiku-contrib,代碼行數:23,代碼來源:utils.py

示例2: set_weights

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def set_weights(net,model_file):
    '''
    Sets the parameters of the model using the weights stored in model_file
    Parameters
    ----------
    net: a Lasagne layer

    model_file: string
        path to the model that containes the weights

    Returns
    -------
    None

    '''
    with open(model_file) as f:
        print('Load pretrained weights from %s...' % model_file)
        model = pickle.load(f)
    print('Set the weights...')
    lasagne.layers.set_all_param_values(net, model,trainable=True)


######## Below, there are several helper functions to transform (lists of) images into the right format  ###### 
開發者ID:Lasagne,項目名稱:Recipes,代碼行數:25,代碼來源:c3d.py

示例3: box_refinement_graph

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def box_refinement_graph(box, gt_box):
    """Compute refinement needed to transform box to gt_box.
    box and gt_box are [N, (y1, x1, y2, x2)]
    """
    box = tf.cast(box, tf.float32)
    gt_box = tf.cast(gt_box, tf.float32)

    height = box[:, 2] - box[:, 0]
    width = box[:, 3] - box[:, 1]
    center_y = box[:, 0] + 0.5 * height
    center_x = box[:, 1] + 0.5 * width

    gt_height = gt_box[:, 2] - gt_box[:, 0]
    gt_width = gt_box[:, 3] - gt_box[:, 1]
    gt_center_y = gt_box[:, 0] + 0.5 * gt_height
    gt_center_x = gt_box[:, 1] + 0.5 * gt_width

    dy = (gt_center_y - center_y) / height
    dx = (gt_center_x - center_x) / width
    dh = tf.log(gt_height / height)
    dw = tf.log(gt_width / width)

    result = tf.stack([dy, dx, dh, dw], axis=1)
    return result 
開發者ID:dataiku,項目名稱:dataiku-contrib,代碼行數:26,代碼來源:utils.py

示例4: rescale

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def rescale(img, input_height, input_width):
    # print("Original image shape:" + str(img.shape) + " --> it should be in H, W, C!")
    # print("Model's input shape is %dx%d") % (input_height, input_width)
    aspect = img.shape[1] / float(img.shape[0])
    # print("Orginal aspect ratio: " + str(aspect))
    if(aspect > 1):
        # landscape orientation - wide image
        res = int(aspect * input_height)
        imgScaled = skimage.transform.resize(
            img,
            (input_width, res),
            preserve_range=False)
    if(aspect < 1):
        # portrait orientation - tall image
        res = int(input_width / aspect)
        imgScaled = skimage.transform.resize(
            img,
            (res, input_height),
            preserve_range=False)
    if(aspect == 1):
        imgScaled = skimage.transform.resize(
            img,
            (input_width, input_height),
            preserve_range=False)
    return imgScaled 
開發者ID:facebookarchive,項目名稱:tutorials,代碼行數:27,代碼來源:helpers.py

示例5: __call__

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def __call__(self, sample):
        image, annots = sample['img'], sample['annot']

        rows, cols, cns = image.shape

        largest_side = max(rows, cols)

        scale = self.img_size / largest_side

        # resize the image with the computed scale
        image = skimage.transform.resize(image, (int(round(rows*scale)), int(round((cols*scale)))))
        rows, cols, cns = image.shape

        new_image = np.zeros((self.img_size, self.img_size, cns)).astype(np.float32)
        new_image[:rows, :cols, :] = image.astype(np.float32)

        annots[:, :4] *= scale

        return {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale} 
開發者ID:tristandb,項目名稱:EfficientDet-PyTorch,代碼行數:21,代碼來源:dataloader.py

示例6: resize

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
           preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
    """A wrapper for Scikit-Image resize().

    Scikit-Image generates warnings on every call to resize() if it doesn't
    receive the right parameters. The right parameters depend on the version
    of skimage. This solves the problem by using different parameters per
    version. And it provides a central place to control resizing defaults.
    """
    if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
        # New in 0.14: anti_aliasing. Default it to False for backward
        # compatibility with skimage 0.13.
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range, anti_aliasing=anti_aliasing,
            anti_aliasing_sigma=anti_aliasing_sigma)
    else:
        return skimage.transform.resize(
            image, output_shape,
            order=order, mode=mode, cval=cval, clip=clip,
            preserve_range=preserve_range) 
開發者ID:dmechea,項目名稱:PanopticSegmentation,代碼行數:24,代碼來源:utils.py

示例7: load_image_array

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def load_image_array(image_file, image_size):
	img = skimage.io.imread(image_file)
	# GRAYSCALE
	if len(img.shape) == 2:
		img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
		img_new[:,:,0] = img
		img_new[:,:,1] = img
		img_new[:,:,2] = img
		img = img_new

	img_resized = skimage.transform.resize(img, (image_size, image_size))

	# FLIP HORIZONTAL WIRH A PROBABILITY 0.5
	if random.random() > 0.5:
		img_resized = np.fliplr(img_resized)
	
	
	return img_resized.astype('float32') 
開發者ID:paarthneekhara,項目名稱:text-to-image,代碼行數:20,代碼來源:image_processing.py

示例8: __init__

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def __init__(self, root_dir, set_name='train2017', transform=None):
        """
        Args:
            root_dir (string): COCO directory.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.root_dir = root_dir
        self.set_name = set_name
        self.transform = transform

        self.coco = COCO(os.path.join(self.root_dir, 'annotations',
                                      'instances_' + self.set_name + '.json'))
        self.image_ids = self.coco.getImgIds()

        self.load_classes() 
開發者ID:toandaominh1997,項目名稱:EfficientDet.Pytorch,代碼行數:18,代碼來源:coco.py

示例9: _convert_grid_indices_crs

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def _convert_grid_indices_crs(self, grid_indices, old_crs, new_crs):
        if _OLD_PYPROJ:
            x2, y2 = pyproj.transform(old_crs, new_crs, grid_indices[:,1],
                                    grid_indices[:,0])
        else:
            x2, y2 = pyproj.transform(old_crs, new_crs, grid_indices[:,1],
                                      grid_indices[:,0], errcheck=True,
                                      always_xy=True)
        yx2 = np.column_stack([y2, x2])
        return yx2

    # def _convert_outer_indices_crs(self, affine, shape, old_crs, new_crs):
    #     y1, x1 = self.grid_indices(affine=affine, shape=shape)
    #     lx, _ = pyproj.transform(old_crs, new_crs,
    #                               x1, np.repeat(y1[0], len(x1)))
    #     rx, _ = pyproj.transform(old_crs, new_crs,
    #                               x1, np.repeat(y1[-1], len(x1)))
    #     __, by = pyproj.transform(old_crs, new_crs,
    #                               np.repeat(x1[0], len(y1)), y1)
    #     __, uy = pyproj.transform(old_crs, new_crs,
    #                               np.repeat(x1[-1], len(y1)), y1)
    #     return by, uy, lx, rx 
開發者ID:mdbartos,項目名稱:pysheds,代碼行數:24,代碼來源:grid.py

示例10: load_img

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def load_img(path):
    """Returns a numpy array of an image specified by its path.
    
    Args:
        path: string representing the file path of the image to load
        
    Returns:
        resized_img: numpy array representing the loaded RGB image
        shape: the image shape
    """

    # Load image [height, width, depth]
    img = skimage.io.imread(path) / 255.0
    assert (0 <= img).all() and (img <= 1.0).all()

    # Crop image from center
    short_edge = min(img.shape[:2])
    yy = int((img.shape[0] - short_edge) / 2)
    xx = int((img.shape[1] - short_edge) / 2)
    shape = list(img.shape)

    crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
    resized_img = skimage.transform.resize(crop_img, (shape[0], shape[1]))
    return resized_img, shape 
開發者ID:mohamedkeid,項目名稱:Feed-Forward-Style-Transfer,代碼行數:26,代碼來源:helpers.py

示例11: __init__

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def __init__(self, root_dir, transform=None, loader = pil_loader):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        if transform == None :
            transform = torchvision.transforms.Compose([torchvision.transforms.Resize(224),
                                                        torchvision.transforms.RandomHorizontalFlip(p=0.5),
                                                        torchvision.transforms.RandomVerticalFlip(p=0.5),
                                                        torchvision.transforms.ToTensor()])
        self.root_dir = root_dir
        self.transform = transform
        self.loader = loader

        self.images = os.listdir(os.path.join(self.root_dir))

        self.image_class = np.array(pd.read_csv('val_details.txt', sep='\t')[['mage','class']]).astype('str')
        self.class_dic = {}
        for i in self.image_class :
            self.class_dic[i[0]]=i[1] 
開發者ID:SathwikTejaswi,項目名稱:deep-ranking,代碼行數:25,代碼來源:test_embedding.py

示例12: minimize_mask

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def minimize_mask(bbox, mask, mini_shape):
    """Resize masks to a smaller version to reduce memory load.
    Mini-masks can be resized back to image scale using expand_masks()

    See inspect_data.ipynb notebook for more details.
    """
    mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
    for i in range(mask.shape[-1]):
        m = mask[:, :, i]
        y1, x1, y2, x2 = bbox[i][:4]
        m = m[y1:y2, x1:x2]
        if m.size == 0:
            raise Exception("Invalid bounding box with area of zero")
        # Resize with bilinear interpolation
        m = skimage.transform.resize(m, mini_shape, order=1, mode="constant")
        mini_mask[:, :, i] = np.around(m).astype(np.bool)
    return mini_mask 
開發者ID:sahibdhanjal,項目名稱:Mask-RCNN-Pedestrian-Detection,代碼行數:19,代碼來源:utils.py

示例13: expand_mask

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def expand_mask(bbox, mini_mask, image_shape):
    """Resizes mini masks back to image size. Reverses the change
    of minimize_mask().

    See inspect_data.ipynb notebook for more details.
    """
    mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
    for i in range(mask.shape[-1]):
        m = mini_mask[:, :, i]
        y1, x1, y2, x2 = bbox[i][:4]
        h = y2 - y1
        w = x2 - x1
        # Resize with bilinear interpolation
        m = skimage.transform.resize(m, (h, w), order=1, mode="constant")
        mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
    return mask


# TODO: Build and use this function to reduce code duplication 
開發者ID:sahibdhanjal,項目名稱:Mask-RCNN-Pedestrian-Detection,代碼行數:21,代碼來源:utils.py

示例14: rgb2caffe

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def rgb2caffe(im, out_size=(128, 171)):
    '''
    Converts an RGB image to caffe format and downscales it as needed by C3D

    Parameters
    ----------
    im numpy array
        an RGB image
    downscale

    Returns
    -------
    a caffe image (channel,height, width) in BGR format

    '''
    im=np.copy(im)
    if len(im.shape)==2: # Make sure the image has 3 channels
        im = color.gray2rgb(im)

    h, w, _ = im.shape
    im = skimage.transform.resize(im, out_size, preserve_range=True)
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    return np.array(im,theano.config.floatX) 
開發者ID:Lasagne,項目名稱:Recipes,代碼行數:29,代碼來源:c3d.py

示例15: box_refinement

# 需要導入模塊: import skimage [as 別名]
# 或者: from skimage import transform [as 別名]
def box_refinement(box, gt_box):
    """Compute refinement needed to transform box to gt_box.
    box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
    assumed to be outside the box.
    """
    box = box.astype(np.float32)
    gt_box = gt_box.astype(np.float32)

    height = box[:, 2] - box[:, 0]
    width = box[:, 3] - box[:, 1]
    center_y = box[:, 0] + 0.5 * height
    center_x = box[:, 1] + 0.5 * width

    gt_height = gt_box[:, 2] - gt_box[:, 0]
    gt_width = gt_box[:, 3] - gt_box[:, 1]
    gt_center_y = gt_box[:, 0] + 0.5 * gt_height
    gt_center_x = gt_box[:, 1] + 0.5 * gt_width

    dy = (gt_center_y - center_y) / height
    dx = (gt_center_x - center_x) / width
    dh = np.log(gt_height / height)
    dw = np.log(gt_width / width)

    return np.stack([dy, dx, dh, dw], axis=1)


############################################################
#  Dataset
############################################################ 
開發者ID:dataiku,項目名稱:dataiku-contrib,代碼行數:31,代碼來源:utils.py


注:本文中的skimage.transform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。