當前位置: 首頁>>代碼示例>>Python>>正文


Python io.imshow方法代碼示例

本文整理匯總了Python中skimage.io.imshow方法的典型用法代碼示例。如果您正苦於以下問題:Python io.imshow方法的具體用法?Python io.imshow怎麽用?Python io.imshow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在skimage.io的用法示例。


在下文中一共展示了io.imshow方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: plot_n_image

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def plot_n_image(X, n):
    """ plot first n images
    n has to be a square number
    """
    pic_size = int(np.sqrt(X.shape[1]))
    grid_size = int(np.sqrt(n))

    first_n_images = X[:n, :]

    fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,
                                    sharey=True, sharex=True, figsize=(8, 8))

    for r in range(grid_size):
        for c in range(grid_size):
            ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))
            plt.xticks(np.array([]))
            plt.yticks(np.array([])) 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:19,代碼來源:8_kmeans_pca.py

示例2: experiment_with_parameters

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def experiment_with_parameters():
    img = misc.imread("wheat.png")

    compactness_values = [30, 50, 70, 100, 200, 300, 500, 700, 1000]
    n_segments_values = [3,4,5,6,7,8,9,10]

    for compactness_val in compactness_values:
        for n in n_segments_values:
            labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n)
            out1 = color.label2rgb(labels1, img, kind='overlay')

            fig, ax = plt.subplots()
            ax.imshow(out1, interpolation='nearest')
            ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n))
            plt.savefig("RAG/c{}_k{}.png".format(compactness_val, n))
            plt.close(fig) 
開發者ID:oduwa,項目名稱:Pic-Numero,代碼行數:18,代碼來源:RAG_threshold.py

示例3: main

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def main():
    img = misc.imread("wheat.png")

    # labels1 = segmentation.slic(img, compactness=100, n_segments=9)
    labels1 = segmentation.slic(img, compactness=50, n_segments=4)
    out1 = color.label2rgb(labels1, img, kind='overlay')
    print(labels1.shape)

    g = graph.rag_mean_color(img, labels1)
    labels2 = graph.cut_threshold(labels1, g, 29)
    out2 = color.label2rgb(labels2, img, kind='overlay')

    # get roi
    # logicalIndex = (labels2 != 1)
    # gray = rgb2gray(img);
    # gray[logicalIndex] = 0;


    plt.figure()
    io.imshow(out1)
    plt.figure()
    io.imshow(out2)
    io.show() 
開發者ID:oduwa,項目名稱:Pic-Numero,代碼行數:25,代碼來源:RAG_threshold.py

示例4: get_sport_clip

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def get_sport_clip(clip_name, verbose=True):
    """
    Loads a clip to be fed to C3D for classification.
    TODO: should I remove mean here?
    
    Parameters
    ----------
    clip_name: str
        the name of the clip (subfolder in 'data').
    verbose: bool
        if True, shows the unrolled clip (default is True).

    Returns
    -------
    Tensor
        a pytorch batch (n, ch, fr, h, w).
    """

    clip = sorted(glob(join('data', clip_name, '*.png')))
    clip = np.array([resize(io.imread(frame), output_shape=(112, 200), preserve_range=True) for frame in clip])
    clip = clip[:, :, 44:44+112, :]  # crop centrally

    if verbose:
        clip_img = np.reshape(clip.transpose(1, 0, 2, 3), (112, 16 * 112, 3))
        io.imshow(clip_img.astype(np.uint8))
        io.show()

    clip = clip.transpose(3, 0, 1, 2)  # ch, fr, h, w
    clip = np.expand_dims(clip, axis=0)  # batch axis
    clip = np.float32(clip)

    return torch.from_numpy(clip) 
開發者ID:DavideA,項目名稱:c3d-pytorch,代碼行數:34,代碼來源:predict.py

示例5: imshow

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def imshow(image):
    """Show a [-1.0, 1.0] image."""
    iio.imshow(dtype.im2uint(image)) 
開發者ID:LynnHo,項目名稱:DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2,代碼行數:5,代碼來源:basic.py

示例6: imshow

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def imshow(image):
    """Show a [-1.0, 1.0] image."""
    iio.imshow(im2float(image)) 
開發者ID:LynnHo,項目名稱:VAE-Tensorflow,代碼行數:5,代碼來源:basic.py

示例7: spectral_cluster

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def spectral_cluster(filename, compactness_val=30, n=6):
    img = misc.imread(filename)
    labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n)
    out1 = color.label2rgb(labels1, img, kind='overlay', colors=['red','green','blue','cyan','magenta','yellow'])

    fig, ax = plt.subplots()
    ax.imshow(out1, interpolation='nearest')
    ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n))
    plt.show() 
開發者ID:oduwa,項目名稱:Pic-Numero,代碼行數:11,代碼來源:RAG_threshold.py

示例8: predict_image

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def predict_image(self, test_img, show=False):
        '''
        predicts classes of input image
        INPUT   (1) str 'test_image': filepath to image to predict on
                (2) bool 'show': True to show the results of prediction, False to return prediction
        OUTPUT  (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels
                (2) if show == True: displays segmentation results
        '''
        imgs = io.imread(test_img).astype('float').reshape(5,240,240)
        plist = []

        # create patches from an entire slice
        for img in imgs[:-1]:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (33,33))
            plist.append(p)
        patches = np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))

        # predict classes of each pixel based on model
        full_pred = self.model_comp.predict_classes(patches)
        fp1 = full_pred.reshape(208,208)
        if show:
            io.imshow(fp1)
            plt.show
        else:
            return fp1 
開發者ID:naldeborgh7575,項目名稱:brain_segmentation,代碼行數:29,代碼來源:Segmentation_Models.py

示例9: draw_points_on_img

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def draw_points_on_img(img, point_ver, point_hor, point_class):
    for i in range(len(point_class)):
        if point_class[i] != 3:
            rr, cc = draw.circle(point_ver[i], point_hor[i], 10, (256, 192))
            #draw.set_color(img, [rr, cc], [0., 0., 0.], alpha=5)
            img[rr, cc, :] = 0
    #io.imshow(img)
    #io.show()

    return img 
開發者ID:VXallset,項目名稱:deep-high-resolution-net.TensorFlow,代碼行數:12,代碼來源:dataset.py

示例10: mytest

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def mytest():
    tfrecord_file = '../dataset/train.tfrecords'

    filename_queue = tf.train.string_input_producer([tfrecord_file], num_epochs=None)
    image_name, image, keypoints_ver, keypoints_hor, keypoints_class = decode_tfrecord(filename_queue)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            # while not coord.should_stop():
            for i in range(10):
                img_name, img, point_ver, point_hor, point_class = sess.run([image_name, image, keypoints_ver,
                                                                             keypoints_hor, keypoints_class])

                print(img_name, point_hor, point_ver, point_class)

                for i in range(len(point_class)):
                    if point_class[i] > 0:
                        rr, cc = draw.circle(point_ver[i], point_hor[i], 10, (256, 192))
                        img[rr, cc, :] = 0

                io.imshow(img)
                io.show()

        except tf.errors.OutOfRangeError:
            print('Done reading')
        finally:
            coord.request_stop() 
開發者ID:VXallset,項目名稱:deep-high-resolution-net.TensorFlow,代碼行數:33,代碼來源:dataset.py

示例11: show_segmented_image

# 需要導入模塊: from skimage import io [as 別名]
# 或者: from skimage.io import imshow [as 別名]
def show_segmented_image(self, test_img, modality='t1c', show = False):
        '''
        Creates an image of original brain with segmentation overlay
        INPUT   (1) str 'test_img': filepath to test image for segmentation, including file extension
                (2) str 'modality': imaging modelity to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
                (3) bool 'show': If true, shows output image. defaults to False.
        OUTPUT  (1) if show is True, shows image of segmentation results
                (2) if show is false, returns segmented image.
        '''
        modes = {'flair':0, 't1':1, 't1c':2, 't2':3}

        segmentation = self.predict_image(test_img, show=False)
        img_mask = np.pad(segmentation, (16,16), mode='edge')
        ones = np.argwhere(img_mask == 1)
        twos = np.argwhere(img_mask == 2)
        threes = np.argwhere(img_mask == 3)
        fours = np.argwhere(img_mask == 4)

        test_im = io.imread(test_img)
        test_back = test_im.reshape(5,240,240)[-2]
        # overlay = mark_boundaries(test_back, img_mask)
        gray_img = img_as_float(test_back)

        # adjust gamma of image
        image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
        sliced_image = image.copy()
        red_multiplier = [1, 0.2, 0.2]
        yellow_multiplier = [1,1,0.25]
        green_multiplier = [0.35,0.75,0.25]
        blue_multiplier = [0,0.25,0.9]

        # change colors of segmented classes
        for i in xrange(len(ones)):
            sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
        for i in xrange(len(twos)):
            sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
        for i in xrange(len(threes)):
            sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
        for i in xrange(len(fours)):
            sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier

        if show:
            io.imshow(sliced_image)
            plt.show()

        else:
            return sliced_image 
開發者ID:naldeborgh7575,項目名稱:brain_segmentation,代碼行數:49,代碼來源:Segmentation_Models.py


注:本文中的skimage.io.imshow方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。