當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.COLOR_BGR2GRAY屬性代碼示例

本文整理匯總了Python中cv2.COLOR_BGR2GRAY屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_BGR2GRAY屬性的具體用法?Python cv2.COLOR_BGR2GRAY怎麽用?Python cv2.COLOR_BGR2GRAY使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_BGR2GRAY屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def main():
	imagePath = "img.jpg"
	
	img = cv2.imread(imagePath)
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	
	generate_histogram(gray)
	
	cv2.imwrite("before.jpg", gray)

	gray = cv2.equalizeHist(gray)
	
	generate_histogram(gray)
	
	cv2.imwrite("after.jpg",gray)
	
	return 0 
開發者ID:felipecorrea,項目名稱:pedestrian-haar-based-detector,代碼行數:19,代碼來源:histcomparison.py

示例2: prediction

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
開發者ID:cristianpb,項目名稱:object-detection,代碼行數:19,代碼來源:motion.py

示例3: movement

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
開發者ID:PiSimo,項目名稱:PiCamNN,代碼行數:20,代碼來源:picam.py

示例4: _augment

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def _augment(self, img, r):
        old_dtype = img.dtype

        if img.ndim == 3:
            if self.rgb is not None:
                m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
                grey = cv2.cvtColor(img.astype('float32'), m)
                mean = np.mean(grey)
            else:
                mean = np.mean(img, axis=(0, 1), keepdims=True)
        else:
            mean = np.mean(img)

        img = img * r + mean * (1 - r)
        if self.clip or old_dtype == np.uint8:
            img = np.clip(img, 0, 255)
        return img.astype(old_dtype) 
開發者ID:tensorpack,項目名稱:dataflow,代碼行數:19,代碼來源:imgproc.py

示例5: getPeakFeatures

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def getPeakFeatures():
    net = DecafNet()

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level)#.flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeak5",features)
    numpy.save("labelsPeak5",labels) 
開發者ID:Zebreu,項目名稱:ConvolutionalEmotion,代碼行數:26,代碼來源:emotionclassification.py

示例6: test_motion

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def test_motion():
    image = cv2.imread("./imgs/image.jpeg")
    print(image.shape)

    detector = Detector_Motion()

    image2 = cv2.imread("./imgs/image_box.jpg")
    print(image2.shape)
    assert image.shape == image2.shape
    image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    image2 = cv2.GaussianBlur(image2, (21, 21), 0)
    detector.avg = image2.astype(float)

    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    image = detector.draw_boxes(image, df)
    print(df)
    assert df.shape[0] == 1

    cv2.imwrite("./imgs/outputcv.jpg", image) 
開發者ID:cristianpb,項目名稱:object-detection,代碼行數:22,代碼來源:test_detection.py

示例7: live_undistort

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def live_undistort(camera, camera_matrix, distortion_coefficients):
    """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame"""
    scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coefficients, camera.size, 1, camera.size
    )
    while True:
        ret, frame = camera.cap.read()
        assert ret
        distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        undistorted_frame = cv2.undistort(
            distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix,
        )
        roi_x, roi_y, roi_w, roi_h = roi
        cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
        cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame)
        cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame)
        cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame)
        cv2.waitKey(10) 
開發者ID:notkarol,項目名稱:derplearning,代碼行數:20,代碼來源:calibrate_camera.py

示例8: test_heatmaps

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def test_heatmaps(heatmaps,img,i):
    heatmaps=heatmaps.numpy()
    #heatmaps=np.squeeze(heatmaps)
    heatmaps=heatmaps[:,:64,:]
    heatmaps=heatmaps.transpose(1,2,0)
    print('heatmap inside shape is',heatmaps.shape)
##    print('----------------here')
##    print(heatmaps.shape)
    img=img.numpy()
    #img=np.squeeze(img)
    img=img.transpose(1,2,0)
    img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    print('heatmaps',heatmaps.shape)
    heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4)
#    print('heatmapsafter',heatmaps.shape)
    for j in range(0, 16):
        heatmap = heatmaps[:,:,j]
        heatmap = heatmap.reshape((256,256,1))
        heatmapimg = np.array(heatmap * 255, dtype = np.uint8)
        heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET)
        heatmap = heatmap/255
        plt.imshow(img)
        plt.imshow(heatmap, alpha=0.5)
        plt.show()
        #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png') 
開發者ID:Naman-ntc,項目名稱:3D-HourGlass-Network,代碼行數:27,代碼來源:my.py

示例9: test

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def test():
	"""
	read the pickle file on disk and implement undistor on image
	show the oringal/undistort image
	"""
	print("Reading the pickle file...")
	pickle_file = open("./camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()

	print("Reading the sample image...")
	img = cv2.imread('corners_founded/corners_found13.jpg')
	img_size = (img.shape[1],img.shape[0])
	dst = cv2.undistort(img, mtx, dist, None, mtx)

	# dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
	# Visualize undistortion
	print("Visulize the result...")
	f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10))
	ax1.imshow(img), ax1.set_title('Original Image', fontsize=15)
	ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15)
	plt.show() 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:26,代碼來源:camera_calibration.py

示例10: search

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def search(self, query_keypoints, query_descriptors):
		# Initialize the dictionary of results
		results = {}

		# Loop over the book cover images
		for path in self.cover_paths:
			# Load the query image, convert it to greyscale, and extract keypoints and descriptors
			cover = cv2.imread(path)
			gray = cv2.cvtColor(cover, cv2.COLOR_BGR2GRAY)
			(keypoints, descriptors) = self.descriptor.describe(gray)

			# Determine the number of matched, inlier keypoints, and update the results
			score = self.match(query_keypoints, query_descriptors, keypoints, descriptors)
			results[path] = score

		# If matches were found, sort them
		if len(results) > 0:
			results = sorted([(v, k) for (k, v) in results.items() if v > 0], reverse=True)

		# Return the results
		return results 
開發者ID:hsSam,項目名稱:PracticalPythonAndOpenCV_CaseStudies,代碼行數:23,代碼來源:covermatcher.py

示例11: get_face

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def get_face(detector, image, cpu=False):
    if cpu:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        try:
            box = detector(image)[0]
            x1 = box.left()
            y1 = box.top()
            x2 = box.right()
            y2 = box.bottom()
            return [x1, y1, x2, y2]
        except:
            return None
    else:
        image = cv2.resize(image, None, fx=0.5, fy=0.5)
        box = detector.detect_from_image(image)[0]
        if box is None:
            return None
        return (2*box[:4]).astype(int) 
開發者ID:kwea123,項目名稱:VTuber_Unity,代碼行數:20,代碼來源:demo.py

示例12: detect_face

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
    """
    Detect the face from the image, return colored face
    """

    cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
    img_path = os.path.abspath(img_path)
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = cc.detectMultiScale(gray, 1.3, 5)
    roi_color = None

    if len(faces) == 0:
        logging.exception(img_path + ': No face found')
    else:
        x,y,w,h = faces[0]
        _h, _w = compute_size(h, w)
        roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]

    return roi_color 
開發者ID:lehgtrung,項目名稱:face-search,代碼行數:23,代碼來源:face_detect.py

示例13: find_blank_rows_h

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def find_blank_rows_h(image):

    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    im_bw = np.zeros(gray_image.shape)
    im_bw[gray_image > 127] = 0
    im_bw[gray_image <= 127] = 1

    row_sum = np.sum(im_bw, axis=1)

    cum_sum = np.zeros(row_sum.shape)

    cum_sum[0] = row_sum[0]

    for i, sum in enumerate(row_sum[1:]):
        cum_sum[i+1] = cum_sum[i] + sum

    blank_rows = []
    for i, sum in enumerate(cum_sum):
        if is_blank(cum_sum, i):
            blank_rows.append(i)

    return blank_rows

# check n last rows 
開發者ID:MaliParag,項目名稱:ScanSSD,代碼行數:27,代碼來源:stitch_patches_page.py

示例14: coherence_filter

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:26,代碼來源:coherence.py

示例15: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2GRAY [as 別名]
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
開發者ID:felipecorrea,項目名稱:pedestrian-haar-based-detector,代碼行數:32,代碼來源:detect.py


注:本文中的cv2.COLOR_BGR2GRAY屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。