当前位置: 首页>>代码示例>>Python>>正文


Python numpy.int0函数代码示例

本文整理汇总了Python中numpy.int0函数的典型用法代码示例。如果您正苦于以下问题:Python int0函数的具体用法?Python int0怎么用?Python int0使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了int0函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: colour_norm

def colour_norm(img):
	sum_img = np.int0(img[:,:,0]) + \
			np.int0(img[:,:,1]) + \
			np.int0(img[:,:,2])
	sum_img = np.dstack([sum_img, sum_img, sum_img])
	img = ((255 * img.astype("int64")) / (sum_img + 1)).astype("uint8")
	return img
开发者ID:shraman-biswas,项目名称:edge_detection,代码行数:7,代码来源:main.py

示例2: __getCentroid

 def __getCentroid(self, mask):
     """ Calculate the centroid of object"""
     x, y = mask.nonzero()
     x = np.int0(x.mean())
     y = np.int0(y.mean())
     centroid =(x, y)
     return centroid
开发者ID:356255531,项目名称:poppyProject,代码行数:7,代码来源:CVAlgorithm.py

示例3: estimate_bbox

def estimate_bbox(cnt, img):
	# calculate bounding box
	rect = cv2.minAreaRect(cnt)
	bbox = cv2.boxPoints(rect)
	bbox = np.int0(bbox)
	#cv2.drawContours(img, [bbox], 0, (0,255,0), 2)

	# rotate bounding box to get a vertical rectangle
	M = cv2.getRotationMatrix2D(rect[0], rect[2], 1)
	pts = np.ones((4, 3))
	pts[:,:-1] = bbox
	bbox_rot = np.int0(np.dot(pts, M.T))

	# resize bounding box to cover the whole document
	bbox_rot[0][0] -= 15
	bbox_rot[0][1] += 120
	bbox_rot[1][0] -= 15
	bbox_rot[2][0] += 5
	bbox_rot[3][0] += 5
	bbox_rot[3][1] += 120

	# rotate back bounding box to original orientation
	p = (bbox_rot[1][0], bbox_rot[1][1])
	M = cv2.getRotationMatrix2D(p, -rect[2], 1)
	pts = np.ones((4, 3))
	pts[:,:-1] = bbox_rot
	bbox = np.int0(np.dot(pts, M.T))
	return bbox
开发者ID:shraman-biswas,项目名称:edge_detection,代码行数:28,代码来源:main.py

示例4: find_first_transmitters

def find_first_transmitters(contours):
    rects = []
    boxs = []
    for contour in contours:
        rect = cv2.minAreaRect(contour)
        if cv2.contourArea(contour) < 100000:  # arbitrary
            continue
        else:
            # find center
            box = cv2.cv.BoxPoints(rect)
            box = numpy.int0(box)
            box = rot_box(box)
            box = numpy.int0(box)
            rects.append(rect)
            boxs.append(box)
    number_of_transmitters = len(rects)
    centers = []

    for i in range(number_of_transmitters):
        # create new algorithm for center of mass calculation. what type of box am i
        x = [p[0] for p in boxs[i]]
        y = [p[1] for p in boxs[i]]
        center = (sum(y) / 4, sum(x) / 4)
        centers.append(center)

    return rects, boxs, centers, number_of_transmitters
开发者ID:glfpes,项目名称:VLP,代码行数:26,代码来源:box_fft.py

示例5: measure_target_width_on_segment

    def measure_target_width_on_segment(self, pt1, pt2):
        """
        Given the line segment L defined by 2d points pt1 and pt2 from a camera 
        frame, find the points pt3 and pt4 the nearest points to pt1 and pt2 
        on L that are masked according to self.mask8. Then calculate the 
        distance D between 3d points pt5 and pt6 in self.xyz which 
        correspond to pt3 and pt4.
        return pt3, pt4, D, fx, fy,
            where 
                pt3 = (x, y)
                pt4 = (x, y)
                fx is the function f(distance from pt3 on L) = x
                fy is the function f(distance from pt3 on L) = y
        If anything goes wrong, return None
        """
        from scipy.interpolate import interp1d

        dist2d = distance(pt1, pt2)
        interpx = interp1d([0, dist2d], [pt1[0], pt2[0]])
        interpy = interp1d([0, dist2d], [pt1[1], pt2[1]])
        t = numpy.linspace(0, int(dist2d), int(dist2d)+1)
        xs = numpy.int0(interpx(t))
        ys = numpy.int0(interpy(t))
        ixs, = self.mask8[ys, xs].nonzero()
        if len(ixs) >= 2:
            x1 = xs[ixs[0]]
            y1 = ys[ixs[0]]
            x2 = xs[ixs[-1]]
            y2 = ys[ixs[-1]]
            xyz1 = self.xyz[:, y1, x1]
            xyz2 = self.xyz[:, y2, x2]
            dist3d = distance(xyz1, xyz2)
            interpx2 = lambda d: (x2-x1)*d/dist2d + x1
            interpy2 = lambda d: (y2-y1)*d/dist2d + y1
            return (x1, y1), (x2, y2), dist3d, interpx2, interpy2
开发者ID:Retro3223,项目名称:2016-vision,代码行数:35,代码来源:vision_processing.py

示例6: ChannelValidity

def ChannelValidity(fileName):
    """
    A function to examine the data from different channels of a tetrode stored in Neuralynx ntt file.
    """ 
    try:
        ntt = mmap_ntt_file(fileName)
        nttUp = True
    except:
        nttUp = False
    if nttUp and ntt.size > 1:
        RndIdx = np.random.randint(ntt.size-1,size=100)
        sample = np.array(ntt['waveforms'][RndIdx])
        chV = np.array([])
        ChannelValidity = np.array([])
        for item in sample:
            chV = np.append(chV,np.array([item[:,ii].sum() for ii in range(4)]))
        chV = chV.reshape(chV.size/4,4)
        ChannelValidity = np.append(ChannelValidity,[chV[:,jj].sum() for jj in range(4)])
        for ii in range(4):
            if np.abs(ChannelValidity)[ii] > 10:
                ChannelValidity[ii] = 1
            else:
                ChannelValidity[ii] = 0
        return np.int0(ChannelValidity)
    else:
        return np.int0([0,0,0,0])
开发者ID:gvrlab,项目名称:ntt2ntt,代码行数:26,代码来源:scriptWriter.py

示例7: find_pipe

    def find_pipe(self, img):
        rows, cols = img.shape[:2]

        blur = cv2.GaussianBlur(img, (5, 5), 0)

        hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

        mask = cv2.inRange(hsv, ORANGE_MIN, ORANGE_MAX)

        bmask = cv2.GaussianBlur(mask, (5, 5), 0)

        contours, _ = cv2.findContours(bmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        blank_img = np.zeros((rows, cols), np.uint8)

        if contours:
            # sort contours by area (greatest --> least)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
            cnt = contours[0]  # contour with greatest area
            if cv2.contourArea(cnt) > 1000:  # this value will change based on our depth/the depth of the pool
                rect = cv2.minAreaRect(cnt)   # find bounding rectangle of min area (including rotation)
                box = cv2.cv.BoxPoints(rect)  # get corner coordinates of that rectangle
                box = np.int0(box)            # convert coordinates to ints

                # draw minAreaRect around pipe
                cv2.drawContours(blank_img, [box], 0, (255, 255, 255), -1)

                # get all coordinates (y,x) of pipe
                why, whx = np.where(blank_img)
                # align coordinates --> (x,y)
                wh = np.array([whx, why])

                # estimate covariance matrix and get corresponding eigenvectors
                cov = np.cov(wh)
                eig_vals, eig_vects = np.linalg.eig(cov)

                # use index of max eigenvalue to find max eigenvector
                i = np.argmax(eig_vals)
                max_eigv = eig_vects[:, i] * np.sqrt(eig_vals[i])

                # flip indices to find min eigenvector
                min_eigv = eig_vects[:, 1 - i] * np.sqrt(eig_vals[1 - i])

                # define center of pipe
                center = np.average(wh, axis=1)

                # define vertical vector (sub's current direction)
                vert_vect = np.array([0, -1 * np.int0(center[1])])

                # calculate angle between vertical and max eigenvector
                num = np.dot(max_eigv, vert_vect)
                denom = np.linalg.norm(max_eigv) * np.linalg.norm(vert_vect)
                angle_rad = np.arccos(num / denom)

                quaternion = transformations.quaternion_from_euler(0.0, 0.0, angle_rad)

                return [center[0], center[1], None], [quaternion[0], quaternion[1], quaternion[2], quaternion[3]]

            else:
                return None
开发者ID:guojiyao,项目名称:Sub8,代码行数:60,代码来源:follow_orange_pipes.py

示例8: get_centroids

def get_centroids (contours, frame):
	centres = []
	if contours:
		for i in range(len(contours)):
			moments = cv2.moments(contours[i])
			centres.append((int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00'])))
		
			if i>0:                
				dist = calculateDistance(centres[i-1][0],centres[i-1][1],centres[i][0],centres[i][1])
				area=cv2.contourArea(contours[i])
				prevarea=cv2.contourArea(contours[i-1])
				if dist < 120:                    
					if area > prevarea:
						rect = cv2.minAreaRect(contours[i])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
					else :
						rect = cv2.minAreaRect(contours[i-1])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
			else:
 	
				rect = cv2.minAreaRect(contours[i])
				box = cv2.boxPoints(rect)
				box = np.int0(box)
				frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
				print(box)
	return centres, frame
开发者ID:EyeTechPae,项目名称:EyeTech,代码行数:32,代码来源:d5_1.py

示例9: draw_walls

    def draw_walls(self):
        left_wall_points = np.array([self.transform(point) for point in self.left_wall_points])
        right_wall_points = np.array([self.transform(point) for point in self.right_wall_points])

        rect = cv2.minAreaRect(left_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        rect = cv2.minAreaRect(right_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        # So I dont have to comment abunch of stuff out for debugging
        dont_display = True
        if dont_display:
            return

        # Bob Ross it up (just for display)
        left_f, right_f = self.transform(self.left_f), self.transform(self.right_f)
        left_b, right_b = self.transform(self.left_b), self.transform(self.right_b)

        boat = self.transform(self.boat_pos)
        target = self.transform(self.target)

        cv2.circle(self.grid, tuple(boat[:2].astype(np.int32)), 8, 255)
        cv2.circle(self.grid, tuple(target[:2].astype(np.int32)), 15, 255)
        cv2.circle(self.grid, tuple(self.transform(self.mid_point)[:2].astype(np.int32)), 5, 255)
        cv2.circle(self.grid, tuple(left_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(right_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(left_b[:2].astype(np.int32)), 3, 125)
        cv2.circle(self.grid, tuple(right_b[:2].astype(np.int32)), 3, 128)
        cv2.imshow("test", self.grid)
        cv2.waitKey(0)
开发者ID:uf-mil,项目名称:Navigator,代码行数:35,代码来源:start_gate.py

示例10: track_by_camshif

  def track_by_camshif(self, frame, contour):
    area = cv2.contourArea(contour)
    rect = cv2.minAreaRect(contour)
    (vx, vy), (x, y), angle = rect
    vx, vy, x, y = np.int0((vx, vy, x, y))

    roi = frame[vy:(vy+y), vx:(x+vx)]
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)

    # compute a HSV histogram for the ROI and store the
    # bounding box
    roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
    roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
    roiBox = (vx, vy, x, y)
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while(1):
      ret, current_frame = self.camera.read()
      current_image = cv2.resize(current_frame,(self.frame_width, self.frame_height), interpolation=cv2.INTER_LINEAR)
      self.custom_wait_key('origin_frame', current_image, current_image)

      hsv = cv2.cvtColor(current_image, cv2.COLOR_BGR2HSV)
      backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)

      # apply cam shift to the back projection, convert the
      # points to a bounding box, and then draw them
      (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
      pts = np.int0(cv2.cv.BoxPoints(r))
      cv2.polylines(current_image, [pts], True, (0, 255, 0), 2)
      self.custom_wait_key('frame', current_image, current_image)
开发者ID:lexuszhi1990,项目名称:jetsontk1,代码行数:31,代码来源:image_processor.py

示例11: draw_box

def draw_box(largest_contour, img):
        ## Find the box excompassing the largest red blob
        rect = cv2.minAreaRect(largest_contour)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        box = np.int0(box)
        cv2.drawContours(img,[box], 0, (0, 0, 255), 2)
        return img
开发者ID:pmaddi,项目名称:IGVC2014,代码行数:8,代码来源:line_detection.py

示例12: sliceImg

def sliceImg(img, slice_part=None, color=(255, 255, 255)):
    if slice_part is not None:
        h, w = img.shape[:2]
        if isinstance(slice_part[0], float):
            cv2.rectangle(img, (np.int0(slice_part[0] * w), 0), (np.int0((slice_part[1]) * w), h), (255, 255, 255), -1)
        else:
            for part in slice_part:
                cv2.rectangle(img, (np.int0(part[0] * w), 0), (np.int0((part[1]) * w), h), (255, 255, 255), -1)
开发者ID:zhangyeyong,项目名称:homeWork,代码行数:8,代码来源:contourU.py

示例13: posterize

def posterize(image, level):
	indices = np.arange(0,256)
	divider = np.linspace(0,255,level+1)[1]
	quantiz = np.int0(np.linspace(0,255,level))
	color_levels = np.clip(np.int0(indices/divider),0,level-1)
	palette = quantiz[color_levels]
	img2 = palette[image]
	img2 = cv2.convertScaleAbs(img2)
	return img2
开发者ID:jefarrell,项目名称:Topography.py,代码行数:9,代码来源:topo.py

示例14: motion_all

def motion_all(event):
    if event.inaxes == ax0:
        yprof.set_xdata(np.int0(yind.clip(event.xdata,event.xdata)))
        xprof.set_ydata(np.int0(xind.clip(event.ydata,event.ydata)))
        pvert.set_xdata(img[:,np.int(event.xdata)])
        pvertc.set_ydata(np.int0(yind.clip(event.ydata,event.ydata)))
        phorz.set_ydata(img[np.int(event.ydata),:])
        phorzc.set_xdata(np.int0(xind.clip(event.xdata,event.xdata)))
        fig.canvas.draw_idle()
开发者ID:tschad,项目名称:schadpy,代码行数:9,代码来源:imgprofiles_old.py

示例15: posterization

def posterization(n, img):
    indices = np.arange(0,256)   # List of all colors 
    divider = np.linspace(0,255,n+1)[1] # we get a divider
    quantiz = np.int0(np.linspace(0,255,n)) # we get quantization colors
    color_levels = np.clip(np.int0(indices/divider),0,n-1) # color levels 0,1,2..
    palette = quantiz[color_levels] # Creating the palette

    img = palette[img]  # Applying palette on image
    return cv2.convertScaleAbs(img) # Converting image back to uint8
开发者ID:bdolenc,项目名称:15secOfFame,代码行数:9,代码来源:15sec_photo.py


注:本文中的numpy.int0函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。