当前位置: 首页>>代码示例>>Python>>正文


Python data.coffee函数代码示例

本文整理汇总了Python中skimage.data.coffee函数的典型用法代码示例。如果您正苦于以下问题:Python coffee函数的具体用法?Python coffee怎么用?Python coffee使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了coffee函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_i2v

def test_i2v():
    """Loads the i2v network and applies it to a test image.
    """
    with tf.Session() as sess:
        net = get_i2v_model()
        tf.import_graph_def(net['graph_def'], name='i2v')
        g = tf.get_default_graph()
        names = [op.name for op in g.get_operations()]
        x = g.get_tensor_by_name(names[0] + ':0')
        softmax = g.get_tensor_by_name(names[-3] + ':0')

        from skimage import data
        img = preprocess(data.coffee())[np.newaxis]
        res = np.squeeze(softmax.eval(feed_dict={x: img}))
        print([(res[idx], net['labels'][idx])
               for idx in res.argsort()[-5:][::-1]])

        """Let's visualize the network's gradient activation
        when backpropagated to the original input image.  This
        is effectively telling us which pixels contribute to the
        predicted class or given neuron"""
        pools = [name for name in names if 'pool' in name.split('/')[-1]]
        fig, axs = plt.subplots(1, len(pools))
        for pool_i, poolname in enumerate(pools):
            pool = g.get_tensor_by_name(poolname + ':0')
            pool.get_shape()
            neuron = tf.reduce_max(pool, 1)
            saliency = tf.gradients(neuron, x)
            neuron_idx = tf.arg_max(pool, 1)
            this_res = sess.run([saliency[0], neuron_idx],
                                feed_dict={x: img})

            grad = this_res[0][0] / np.max(np.abs(this_res[0]))
            axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
            axs[pool_i].set_title(poolname)
开发者ID:Arn-O,项目名称:kadenze-deep-creative-apps,代码行数:35,代码来源:i2v.py

示例2: test_random_enhance_any_color

def test_random_enhance_any_color():
    image = data.coffee()

    for i in xrange(10):
        enhanced = random_enhance_color(image,_seed=42)
        assert (image != enhanced).any()
        assert (image.shape == enhanced.shape)
        assert (image.sum() < enhanced.sum())
开发者ID:psteinb,项目名称:20150925-scads,代码行数:8,代码来源:test_bootstrap_utils.py

示例3: color_transformation

def color_transformation():
    # 彩色变换
    image=data.coffee()
    brighter=np.uint8(image*0.5+255*0.5)
    darker=np.uint8(image*0.5)
    io.imshow(brighter)
    io.show()
    io.imshow(darker)
    io.show()
开发者ID:xingnix,项目名称:learning,代码行数:9,代码来源:colorimage.py

示例4: getImage

	def getImage(self,params):
		sigma = float(params['sigma'])
		r = float(params['red'])
		g = float(params['green'])
		b = float(params['blue'])
		image = data.coffee()
		new_image = filter.gaussian_filter(image, sigma=sigma, multichannel=True)
		new_image[:,:,0] = r*new_image[:,:,0]
		new_image[:,:,1] = g*new_image[:,:,1]
		new_image[:,:,2] = b*new_image[:,:,2]
		return new_image
开发者ID:40a,项目名称:spyre,代码行数:11,代码来源:image_editor.py

示例5: main

def main():
    """Load image, collect pixels, cluster, create segment images, plot."""
    # load image
    img_rgb = data.coffee()
    img_rgb = misc.imresize(img_rgb, (256, 256)) / 255.0
    img = color.rgb2hsv(img_rgb)
    height, width, channels = img.shape
    print("Image shape is: ", img.shape)

    # collect pixels as tuples of (r, g, b, y, x)
    print("Collecting pixels...")
    pixels = []
    for y in range(height):
        for x in range(width):
            pixel = img[y, x, ...]
            pixels.append([pixel[0], pixel[1], pixel[2], (y / height) * 2.0, (x / width) * 2.0])
    pixels = np.array(pixels)
    print("Found %d pixels to cluster" % (len(pixels)))

    # cluster the pixels using mean shift
    print("Clustering...")
    bandwidth = estimate_bandwidth(pixels, quantile=0.05, n_samples=500)
    clusterer = MeanShift(bandwidth=bandwidth, bin_seeding=True)
    labels = clusterer.fit_predict(pixels)

    # process labels generated during clustering
    labels_unique = set(labels)
    labels_counts = [(lu, len([l for l in labels if l == lu])) for lu in labels_unique]
    labels_unique = sorted(list(labels_unique), key=lambda l: labels_counts[l], reverse=True)
    nb_clusters = len(labels_unique)
    print("Found %d clusters" % (nb_clusters))
    print(labels.shape)

    print("Creating images of segments...")
    img_segments = [np.copy(img_rgb) * 0.25 for label in labels_unique]

    for y in range(height):
        for x in range(width):
            pixel_idx = (y * width) + x
            label = labels[pixel_idx]
            img_segments[label][y, x, 0] = 1.0

    print("Plotting...")
    images = [img_rgb]
    titles = ["Image"]
    for i in range(min(8, nb_clusters)):
        images.append(img_segments[i])
        titles.append("Segment %d" % (i))

    plot_images(images, titles)
开发者ID:aleju,项目名称:computer-vision-algorithms,代码行数:50,代码来源:mean_shift_segmentation.py

示例6: test_write_rgb

def test_write_rgb(tmpdir_factory):
    img = coffee()
    filename = str(tmpdir_factory.mktemp("write").join("rgb_img.tif"))
    with Tiff(filename, "w") as handle:
        handle.write(img, method="tile")
    with Tiff(filename) as handle:
        data = handle[:]
        assert np.all(img == data[:, :, :3])

    with Tiff(filename, "w") as handle:
        handle.write(img, method="scanline")
    with Tiff(filename) as handle:
        data = handle[:]
        assert np.all(img == data[:, :, :3])
开发者ID:FZJ-INM1-BDA,项目名称:pytiff,代码行数:14,代码来源:test_write.py

示例7: run

def run(dict,canload=0):
    import os.path
    if 'fname' in dict:
        filename=dict['fname']
    else:
        print("No filename given")
        exit(1)
    print("\n",filename,"============================================","\n")
    plt.ion()
    G=hamiltonian.GaussGreen(dict['ell'],0)
    no_steps=dict['no_steps']
    if isinstance(no_steps, list):
        ODE=diffeo.MultiShoot(G,1)
    else:
        ODE=diffeo.Shoot(G)  # use single shooting
    #
    ODE.set_no_steps(dict['no_steps'])
    ODE.set_landmarks(dict['landmarks_n'])
    ODE.solve()
    # plot warp
    plot_setup()
    plt.axis('equal')
    ODE.plot_warp()
    plt.savefig(filename+'warp.pdf',bbox_inches='tight')
    #
    # load test image
    #image = data.checkerboard()
    image = data.coffee()
    #
    # apply warp to image
    new_image=ODE.warp(image)
    # plotting and save to png
    plot_setup()
    plt.close()
    fig, (ax0, ax1) = plt.subplots(1, 2,
                                   figsize=(8, 3),
                                   sharex=True,
                                   sharey=True,
                                   subplot_kw={'adjustable':'box-forced'}
                                   )
    ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
    mpl.image.imsave('orig_image.png',image,cmap=plt.cm.gray)
    ax0.axis('off')
    #
    ax1.imshow(new_image, cmap=plt.cm.gray, interpolation='none')
    mpl.image.imsave('new_image.png',new_image,cmap=plt.cm.gray)
    ax1.axis('off')
    plt.show()
    print("finished.")
开发者ID:tonyshardlow,项目名称:reg_sde,代码行数:49,代码来源:run_warp.py

示例8: test_minsize

def test_minsize():
    # single-channel:
    img = data.coins()[20:168, 0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
开发者ID:Cadair,项目名称:scikit-image,代码行数:15,代码来源:test_felzenszwalb.py

示例9: test_minsize

def test_minsize():
    # single-channel:
    img = data.coins()[20:168,0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        # the construction doesn't guarantee min_size is respected
        # after intersecting the sementations for the colors
        assert_greater(np.mean(counts) + 1, min_size)
开发者ID:AceHao,项目名称:scikit-image,代码行数:17,代码来源:test_felzenszwalb.py

示例10: _build_expected_output

    def _build_expected_output(self):
        funcs = (grey.erosion, grey.dilation, grey.opening, grey.closing,
                 grey.white_tophat, grey.black_tophat)
        selems_2D = (selem.square, selem.diamond,
                     selem.disk, selem.star)

        with expected_warnings(['Possible precision loss']):
            image = img_as_ubyte(transform.downscale_local_mean(
                color.rgb2gray(data.coffee()), (20, 20)))

        output = {}
        for n in range(1, 4):
            for strel in selems_2D:
                for func in funcs:
                    key = '{0}_{1}_{2}'.format(strel.__name__, n, func.__name__)
                    output[key] = func(image, strel(n))

        return output
开发者ID:AbdealiJK,项目名称:scikit-image,代码行数:18,代码来源:test_grey.py

示例11: conditions

independently for each channel, as long as the number of channels is equal in
the input image and the reference.

Histogram matching can be used as a lightweight normalisation for image
processing, such as feature matching, especially in circumstances where the
images have been taken from different sources or in different conditions (i.e.
lighting).
"""

import matplotlib.pyplot as plt

from skimage import data
from skimage import exposure
from skimage.transform import match_histograms

reference = data.coffee()
image = data.chelsea()

matched = match_histograms(image, reference)

fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
                                    sharex=True, sharey=True)
for aa in (ax1, ax2, ax3):
    aa.set_axis_off()

ax1.imshow(image)
ax1.set_title('Source')
ax2.imshow(reference)
ax2.set_title('Reference')
ax3.imshow(matched)
ax3.set_title('Matched')
开发者ID:anntzer,项目名称:scikit-image,代码行数:31,代码来源:plot_histogram_matching.py

示例12:

    This method computes the mean color of `dst`.

    Parameters
    ----------
    graph : RAG
        The graph under consideration.
    src, dst : int
        The vertices in `graph` to be merged.
    """
    graph.node[dst]['total color'] += graph.node[src]['total color']
    graph.node[dst]['pixel count'] += graph.node[src]['pixel count']
    graph.node[dst]['mean color'] = (graph.node[dst]['total color'] /
                                     graph.node[dst]['pixel count'])


img = data.coffee()
labels = segmentation.slic(img, compactness=30, n_segments=400)
g = graph.rag_mean_color(img, labels)

labels2 = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False,
                                   in_place_merge=True,
                                   merge_func=merge_mean_color,
                                   weight_func=_weight_mean_color)

g2 = graph.rag_mean_color(img, labels2)

out = color.label2rgb(labels2, img, kind='avg')
out = segmentation.mark_boundaries(out, labels2, (0, 0, 0))
io.imshow(out)
io.show()
开发者ID:AbdealiJK,项目名称:scikit-image,代码行数:30,代码来源:plot_rag_merge.py

示例13:

image can then be effectively performed by a mere thresholding of the HSV
channels.

.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV

"""

##############################################################################
# We first load the RGB image and extract the Hue and Value channels:

import matplotlib.pyplot as plt

from skimage import data
from skimage.color import rgb2hsv

rgb_img = data.coffee()
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
value_img = hsv_img[:, :, 2]

fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(8, 2))

ax0.imshow(rgb_img)
ax0.set_title("RGB image")
ax0.axis('off')
ax1.imshow(hue_img, cmap='hsv')
ax1.set_title("Hue channel")
ax1.axis('off')
ax2.imshow(value_img)
ax2.set_title("Value channel")
ax2.axis('off')
开发者ID:ThomasWalter,项目名称:scikit-image,代码行数:31,代码来源:plot_rgb_to_hsv.py

示例14: int

References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
       method." Pattern Recognition, 2002. Proceedings. 16th International
       Conference on. Vol. 2. IEEE, 2002
"""

import matplotlib.pyplot as plt

from skimage import data, filter, color
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter

# Load picture, convert to grayscale and detect edges
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = filter.canny(image_gray, sigma=2.0,
                     low_threshold=0.55, high_threshold=0.8)

# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
                       min_size=100, max_size=120)
result.sort(order='accumulator')

# Estimated parameters for the ellipse
best = result[-1]
yc = int(best[1])
开发者ID:acfyfe,项目名称:scikit-image,代码行数:30,代码来源:plot_circular_elliptical_hough_transform.py

示例15: import

from skimage.data import coffee, camera
from sklearn_theano.feature_extraction import (
    GoogLeNetTransformer, GoogLeNetClassifier)
import numpy as np
from nose import SkipTest
import os

co = coffee().astype(np.float32)
ca = camera().astype(np.float32)[:, :, np.newaxis] * np.ones((1, 1, 3),
                                                             dtype='float32')


def test_googlenet_transformer():
    """smoke test for googlenet transformer"""
    if os.environ.get('CI', None) is not None:
        raise SkipTest("Skipping heavy data loading on CI")
    t = GoogLeNetTransformer()

    t.transform(co)
    t.transform(ca)


def test_googlenet_classifier():
    """smoke test for googlenet classifier"""
    if os.environ.get('CI', None) is not None:
        raise SkipTest("Skipping heavy data loading on CI")
    c = GoogLeNetClassifier()

    c.predict(co)
    c.predict(ca)
开发者ID:Faruk-Ahmed,项目名称:sklearn-theano,代码行数:30,代码来源:test_googlenet.py


注:本文中的skimage.data.coffee函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。