当前位置: 首页>>代码示例>>Python>>正文


Python transform.hough_line函数代码示例

本文整理汇总了Python中skimage.transform.hough_line函数的典型用法代码示例。如果您正苦于以下问题:Python hough_line函数的具体用法?Python hough_line怎么用?Python hough_line使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了hough_line函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_hough_line_bad_input

def test_hough_line_bad_input():
    img = np.zeros(100)
    img[10] = 1

    # Expected error, img must be 2D
    with testing.raises(ValueError):
        transform.hough_line(img, np.linspace(0, 360, 10))
开发者ID:Cadair,项目名称:scikit-image,代码行数:7,代码来源:test_hough_transform.py

示例2: compareImages

def compareImages():
    image1 = Image.open("../Images/image5.png")
    image2 = Image.open("../Images/image5_bold.png")
    
    blurredImage1 = image1.filter(ImageFilter.GaussianBlur(radius = 2))
    imageData1 = toMatrix(blurredImage1, blurredImage1.size[1], blurredImage1.size[0])
    blurredImage2 = image2.filter(ImageFilter.GaussianBlur(radius = 2))
    imageData2 = toMatrix(blurredImage2, blurredImage2.size[1], blurredImage2.size[0])
    
    h1, theta1, d1 = hough_line(imageData1)
    h2, theta2, d2 = hough_line(imageData2)
开发者ID:tjohnson314,项目名称:GraphReader,代码行数:11,代码来源:learner.py

示例3: hough_detect

def hough_detect(binary_maps, vote_thresh=12):
    """ Use the Hough detection method to detect lines in the data.
    With enough lines, you can fill in the wave front."""
    detection = []
    print("Performing hough transform on binary maps...")
    for img in binary_maps:
        # Perform the hough transform on each of the difference maps
        transform, theta, d = hough_line(img.data)

        # Filter the hough transform results and find the best lines in the
        # data.  Keep detections that exceed the Hough vote threshold.
        indices = (transform>vote_thresh).nonzero()
        distances = d[indices[0]]
        theta = theta[indices[1]]

        # Perform the inverse transform to get a series of rectangular
        # images that show where the wavefront is.
        # Create a map which is the same as the
        invTransform = sunpy.map.Map(np.zeros(img.data.shape), img.meta)
        invTransform.data = np.zeros(img.data.shape)
        
        # Add up all the detected lines over each other.  The idea behind
        # adding up all the lines on top of each other is that pixels that
        # have larger number of detections are more likely to be in the
        # wavefront.  Note that we are using th Hough transform - which is used
        # to detect lines - to detect and fill in a region.  You might see this
        # as an abuse of the Hough transform!
        for i in range(0,len(indices[1])):
            nextLine = htLine(distances[i], theta[i], np.zeros(shape=img.data.shape))
            invTransform = invTransform + nextLine

        detection.append(invTransform)

    return detection
开发者ID:sunpy,项目名称:eitwave,代码行数:34,代码来源:aware_utils.py

示例4: test_hough_line_peaks_num

def test_hough_line_peaks_num():
    img = np.zeros((100, 100), dtype=np.bool_)
    img[:, 30] = True
    img[:, 40] = True
    hspace, angles, dists = tf.hough_line(img)
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_distance=0,
                                   min_angle=0, num_peaks=1)[0]) == 1
开发者ID:noahstier,项目名称:scikit-image,代码行数:7,代码来源:test_hough_transform.py

示例5: igs1_measure_hough

def igs1_measure_hough(realization,model,redshift=1.0,big_fiducial_set=False,threshold=0.1,bins=np.linspace(0.0,0.0014,50)):


	"""
	Measures all the statistical descriptors of a convergence map as indicated by the index instance
	
	"""

	logging.debug("Processing {0}".format(model.getNames(realization,z=redshift,big_fiducial_set=big_fiducial_set,kind="convergence")))

	#Load the map
	conv_map = model.load(realization,z=redshift,big_fiducial_set=big_fiducial_set,kind="convergence")

	#Log to user
	logging.debug("Measuring hough histograms...")

	#Compute the hough transform
	linmap = conv_map.data > np.random.rand(*conv_map.data.shape) * threshold
	out,angle,d = hough_line(linmap)

	#Compute the histogram of the hough transform map
	h,b = np.histogram(out.flatten()*1.0/linmap.sum(),bins=bins)

	#Return
	return h
开发者ID:apetri,项目名称:AstroVision,代码行数:25,代码来源:measure_all_features.py

示例6: houghLine

 def houghLine(img2d):
     "gray input"
     med_filter = ndimg.median_filter(img2d, size = (5,5))
     edges = filter.sobel(med_filter/255.)
     [H,theta,distances] = transform.hough_line(edges);
     imgsize = float(len(theta)*len(distances))
     return H.sum()/imgsize
开发者ID:yigong,项目名称:AY250,代码行数:7,代码来源:run_final_classifier.py

示例7: calculate

 def calculate(self, image: np.ndarray, disk_size: int=9,
               mean_threshold: int=100, min_object_size: int=750) -> float:
     # Find edges that have a strong vertical direction
     vertical_edges = sobel_v(image)
     # Separate out the areas where there is a large amount of vertically-oriented stuff
     segmentation = self._segment_edge_areas(vertical_edges, disk_size, mean_threshold, min_object_size)
     # Draw a line that follows the center of the segments at each point, which should be roughly vertical
     # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
     # each line
     skeletons = skeletonize(segmentation)
     # Use the Hough transform to get the closest lines that approximate those four lines
     hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       0.0001))
     # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
     # being completely vertical
     # These angles correspond to the angles of the four sides of the channels, which we need to
     # correct for
     angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
     if not angles:
         raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
     else:
         # Get the average angle and convert it to degrees
         offset = sum(angles) / len(angles) * 180.0 / math.pi
         if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
             log.warn("Image is heavily skewed. Check that the images are valid.")
         return offset
开发者ID:jimrybarski,项目名称:fylm_critic,代码行数:27,代码来源:rotate.py

示例8: calculate_rotation

def calculate_rotation(image):
    # sometimes we snag corners, by cropping the left and right 10% of the image we focus only on the
    # vertical bars formed by the structure
    height, width = image.shape
    crop = int(width * 0.1)
    cropped_image = image[:, crop: width - crop]
    # Find edges that have a strong vertical direction
    vertical_edges = sobel_v(cropped_image)
    # Separate out the areas where there is a large amount of vertically-oriented stuff
    segmentation = segment_edge_areas(vertical_edges)
    # Draw a line that follows the center of the segments at each point, which should be roughly vertical
    # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
    # each line
    skeletons = skeletonize(segmentation)
    # Use the Hough transform to get the closest lines that approximate those four lines
    hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      0.0001))
    # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
    # being completely vertical
    # These angles correspond to the angles of the four sides of the channels, which we need to
    # correct for
    angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
    if not angles:
        raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
    else:
        # Get the average angle and convert it to degrees
        offset = sum(angles) / len(angles) * 180.0 / math.pi
        if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
            log.warn("Image is heavily skewed. Check that the images are valid.")
        return offset
开发者ID:jimrybarski,项目名称:fylm_critic,代码行数:31,代码来源:tiff_to_hdf5.py

示例9: test_hough_line_angles

def test_hough_line_angles():
    img = np.zeros((10, 10))
    img[0, 0] = 1

    out, angles, d = tf.hough_line(img, np.linspace(0, 360, 10))

    assert_equal(len(angles), 10)
开发者ID:noahstier,项目名称:scikit-image,代码行数:7,代码来源:test_hough_transform.py

示例10: test_hough_line_peaks_zero_input

def test_hough_line_peaks_zero_input():
    # Test to make sure empty input doesn't cause a failure
    img = np.zeros((100, 100), dtype='uint8')
    theta = np.linspace(0, np.pi, 100)
    hspace, angles, dists = transform.hough_line(img, theta)
    h, a, d = transform.hough_line_peaks(hspace, angles, dists)
    assert_equal(a, np.array([]))
开发者ID:Cadair,项目名称:scikit-image,代码行数:7,代码来源:test_hough_transform.py

示例11: test_hough_line_peaks_num

def test_hough_line_peaks_num():
    img = np.zeros((100, 100), dtype=np.bool_)
    img[:, 30] = True
    img[:, 40] = True
    hspace, angles, dists = tf.hough_line(img)
    with expected_warnings(['`background`']):
        assert len(tf.hough_line_peaks(hspace, angles, dists, min_distance=0,
                                       min_angle=0, num_peaks=1)[0]) == 1
开发者ID:MartinSavc,项目名称:scikit-image,代码行数:8,代码来源:test_hough_transform.py

示例12: test_hough_line_peaks_angle

def test_hough_line_peaks_angle():
    img = np.zeros((100, 100), dtype=np.bool_)
    img[:, 0] = True
    img[0, :] = True

    hspace, angles, dists = tf.hough_line(img)
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=45)[0]) == 2
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=90)[0]) == 1

    theta = np.linspace(0, np.pi, 100)
    hspace, angles, dists = tf.hough_line(img, theta)
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=45)[0]) == 2
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=90)[0]) == 1

    theta = np.linspace(np.pi / 3, 4. / 3 * np.pi, 100)
    hspace, angles, dists = tf.hough_line(img, theta)
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=45)[0]) == 2
    assert len(tf.hough_line_peaks(hspace, angles, dists, min_angle=90)[0]) == 1
开发者ID:ChrisBeaumont,项目名称:scikit-image,代码行数:18,代码来源:test_hough_transform.py

示例13: test_hough_line_peaks_dist

def test_hough_line_peaks_dist():
    img = np.zeros((100, 100), dtype=np.bool_)
    img[:, 30] = True
    img[:, 40] = True
    hspace, angles, dists = transform.hough_line(img)
    assert len(transform.hough_line_peaks(hspace, angles, dists,
                                          min_distance=5)[0]) == 2
    assert len(transform.hough_line_peaks(hspace, angles, dists,
                                          min_distance=15)[0]) == 1
开发者ID:Cadair,项目名称:scikit-image,代码行数:9,代码来源:test_hough_transform.py

示例14: align_with_boarder

def align_with_boarder(image, sigma=1):

    edges = ft.canny(image, sigma=sigma)
    # edges = abs(fil.sobel_v(image))

    h, theta, d = tf.hough_line(edges)

    a, rot_angle, c = tf.hough_line_peaks(h, theta, d, min_distance=0)
    image = rotate(image, np.rad2deg(rot_angle[0]))

    return image
开发者ID:MK8J,项目名称:PV_analysis,代码行数:11,代码来源:util.py

示例15: test_ideal_tfr

 def test_ideal_tfr(self):
     """Test if the ideal TFR can be found using the instantaneous frequency
     laws."""
     _, iflaw1 = fmlin(128, 0.0, 0.2)
     _, iflaw2 = fmlin(128, 0.3, 0.5)
     iflaws = np.c_[iflaw1, iflaw2].T
     tfr, _, _ = pproc.ideal_tfr(iflaws)
     tfr[tfr == 1] = 255
     tfr = tfr.astype(np.uint8)
     hspace, angles, dists = hough_line(tfr)
     for x in hough_line_peaks(hspace, angles, dists):
         self.assertEqual(len(x), 2)
开发者ID:fmarrabal,项目名称:pytftb,代码行数:12,代码来源:test_postprocessing.py


注:本文中的skimage.transform.hough_line函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。