当前位置: 首页>>代码示例>>Python>>正文


Python cv2.COLOR_BGR2RGBA属性代码示例

本文整理汇总了Python中cv2.COLOR_BGR2RGBA属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_BGR2RGBA属性的具体用法?Python cv2.COLOR_BGR2RGBA怎么用?Python cv2.COLOR_BGR2RGBA使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_BGR2RGBA属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getLicensePlateNumber

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def getLicensePlateNumber(filer):
	try:
		js = api.recognize_file(filer, secret_key, country, recognize_vehicle=recognize_vehicle, state=state, return_image=return_image, topn=topn, prewarp=prewarp)

		js=js.to_dict()
		#js=list(str(js))
		X1=js['results'][0]['coordinates'][0]['x']
		Y1=js['results'][0]['coordinates'][0]['y']
		X2=js['results'][0]['coordinates'][2]['x']
		Y2=js['results'][0]['coordinates'][2]['y']
		img=cv2.imread(filer)
		rimg=img[Y1:Y2,X1:X2]
		frame3=rimg
		img3 = Image.fromarray(frame3)
		w,h=img3.size
		asprto=w/h
		frame3=cv2.resize(frame3,(150,int(150/asprto)))
		cv2image3 = cv2.cvtColor(frame3, cv2.COLOR_BGR2RGBA)
		img3 = Image.fromarray(cv2image3)
		imgtk3 = ImageTk.PhotoImage(image=img3)
		display4.imgtk = imgtk3 #Shows frame for display 1
		display4.configure(image=imgtk3)
		display5.configure(text=js['results'][0]['plate'])
	except ApiException as e:
	    print("Exception: \n", e) 
开发者ID:ShreyAmbesh,项目名称:Traffic-Rule-Violation-Detection-System,代码行数:27,代码来源:VehicleMoniter.py

示例2: checkRedLightCrossed

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def checkRedLightCrossed(img):
	global count
	for v in vehicles:
		if v.crossed==False and len(v.points)>=2:
			x1,y1=v.points[0]
			x2,y2=v.points[-1]
			if y1>yl3 and y2<yl3:
				count+=1
				v.crossed=True
				bimg=img[int(v.rect[1]):int(v.rect[1]+v.rect[3]), int(v.rect[0]):int(v.rect[0]+v.rect[2])]
				frame2=bimg
				img2 = Image.fromarray(frame2)
				w,h=img2.size
				asprto=w/h
				frame2=cv2.resize(frame2,(250,int(250/asprto)))
				cv2image2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGBA)
				img2 = Image.fromarray(cv2image2)
				imgtk2 = ImageTk.PhotoImage(image=img2)
				display2.imgtk = imgtk2 #Shows frame for display 1
				display2.configure(image=imgtk2)
				#cv2.imshow('BROKE',bimg)
				name='Rule Breakers/culprit'+str(time.time())+'.jpg'
				cv2.imwrite(name,bimg)
				
				
				tstop = threading.Event()
				thread = threading.Thread(target=getLicensePlateNumber, args=(name,))
				thread.daemon = True
				thread.start()
				

				#cv2.imwrite('culprit.png',bimg)
	#display3.configure(text=count) 
开发者ID:ShreyAmbesh,项目名称:Traffic-Rule-Violation-Detection-System,代码行数:35,代码来源:VehicleMoniter.py

示例3: checkSpeed

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def checkSpeed(ftime,img):
	for v in vehicles:
		if v.speedChecked==False and len(v.points)>=2:
			x1,y1=v.points[0]
			x2,y2=v.points[-1]
			if y2<yl1 and y2>yl3 and v.entered==False:
				v.enterTime=ftime
				v.entered=True
			elif  y2<yl3  and y2 > yl5 and v.exited==False:
				v.exitTime=ftime
				v.exited==False
				v.speedChecked=True
				speed=60/(v.exitTime-v.enterTime)
				print(speed)
				bimg=img[int(v.rect[1]):int(v.rect[1]+v.rect[3]), int(v.rect[0]):int(v.rect[0]+v.rect[2])]
				frame2=bimg
				img2 = Image.fromarray(frame2)
				w,h=img2.size
				asprto=w/h
				frame2=cv2.resize(frame2,(250,int(250/asprto)))
				cv2image2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGBA)
				img2 = Image.fromarray(cv2image2)
				imgtk2 = ImageTk.PhotoImage(image=img2)
				display2.imgtk = imgtk2 #Shows frame for display 1
				display2.configure(image=imgtk2)
				display3.configure(text=str(speed)[:5]+'Km/hr')
				if speed>60:
					
					#cv2.imshow('BROKE',bimg)
					name='Rule Breakers/culprit'+str(time.time())+'.jpg'
					cv2.imwrite(name,bimg)
					tstop = threading.Event()
					thread = threading.Thread(target=getLicensePlateNumber, args=(name,))
					thread.daemon = True
					thread.start() 
开发者ID:ShreyAmbesh,项目名称:Traffic-Rule-Violation-Detection-System,代码行数:37,代码来源:VehicleMoniter.py

示例4: convert_channel_from_bgr

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def convert_channel_from_bgr(img, num_channels):
        if num_channels in [0, 1]:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            if num_channels == 1:
                img = img[..., np.newaxis]

            return img

        elif num_channels in [-1, 3]:  # BGR => RGB
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB, dst=img)

        elif num_channels == 4:  # BGR => RGBA
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)

        raise ValueError("num_channels must be [-1, 0, 1, 3, 4]") 
开发者ID:sony,项目名称:nnabla,代码行数:17,代码来源:cv2_backend.py

示例5: _build_final_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def _build_final_image(self, image):
        """See upper class description.
        """
        if self._overlay_image:
            overlay = cv2.cvtColor(cv2.imread(self._overlay_image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGBA)
            overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)

            # Fix the overlay. Why we have to do this? If we don't, pixels are marked
            # as opaque when they shouldn't be. See:
            # https://www.pyimagesearch.com/2016/04/25/watermarking-images-with-opencv-and-python
            RR, GG, BB, A = cv2.split(overlay)
            RR = cv2.bitwise_and(RR, RR, mask=A)
            GG = cv2.bitwise_and(GG, GG, mask=A)
            BB = cv2.bitwise_and(BB, BB, mask=A)
            overlay = cv2.merge([RR, GG, BB, A])

            # Add an extra dimension to the image (i.e., the alpha transparency)
            if image.shape[2] == 3:
                image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)

            # Now create a mask of overlay and create its inverse mask also
            img2gray = cv2.cvtColor(overlay, cv2.COLOR_RGB2GRAY)
            _ret, mask = cv2.threshold(img2gray, 30, 255, cv2.THRESH_BINARY)
            mask_inv = cv2.bitwise_not(mask)
            # Now black-out the area of overlay in ROI (ie image)
            img1_bg = cv2.bitwise_and(image, image, mask=mask_inv)
            # Take only region of overlay from overlay image
            img2_fg = cv2.bitwise_and(overlay, overlay, mask=mask)
            # Generate the main image
            image = cv2.add(img1_bg, img2_fg)
            # Remove alpha dimension
            image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)

        return Image.fromarray(image) 
开发者ID:pibooth,项目名称:pibooth,代码行数:36,代码来源:factory.py

示例6: extractSkin

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def extractSkin(image):
    # Taking a copy of the image
    img =  image.copy()
    # Converting from BGR Colours Space to HSV
    img =  cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
  
    # Defining HSV Threadholds
    lower_threshold = np.array([0, 48, 80], dtype=np.uint8)
    upper_threshold = np.array([20, 255, 255], dtype=np.uint8)
  
    # Single Channel mask,denoting presence of colours in the specified threshold
    skinMask = cv2.inRange(img,lower_threshold,upper_threshold)
  
    # Cleaning up mask using Gaussian Filter
    skinMask = cv2.GaussianBlur(skinMask,(3,3),0)

    # Extracting skin from the threshold mask
    skin  =  cv2.bitwise_and(img,img,mask=skinMask)
  
    # Converting the image back to BRG color space
    img = cv2.cvtColor(skin,cv2.COLOR_HSV2BGR)

    # Observed BGR to RGBA conversion gives a more appropriate color tint that opencv colormask options
    # Added alpha channel to convert black pixels transparent and overlap (WIP) 
    img_a = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
    
    # Return the Skin image
    return img_a 
开发者ID:scalability4all,项目名称:voice-enabled-chatbot,代码行数:30,代码来源:blue_filter.py

示例7: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2RGBA [as 别名]
def main(sess=sesser):
	'''global masterframe
	global started'''
	if True:
		fTime=time.time()
		_,image_np=cap.read(0)
		#image_np = imutils.resize(image_np, width=400)

		# Definite input and output Tensors for detection_graph


		# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
		image_np_expanded = np.expand_dims(image_np, axis=0)
		# Actual detection.
		(boxes, scores, classes, num) = sess.run(
			[detection_boxes, detection_scores, detection_classes, num_detections],
			feed_dict={image_tensor: image_np_expanded})


		# Visualization of the results of a detection.
		img=image_np
		imgF,coords=vis_util.visualize_boxes_and_labels_on_image_array(
			image_np,
			np.squeeze(boxes),
			np.squeeze(classes).astype(np.int32),
			np.squeeze(scores),
			category_index,
			use_normalized_coordinates=True,
			line_thickness=2)

		matchVehicles(coords,im_width,im_height,imgF)
		checkRedLightCrossed(imgF)
		checkSpeed(fTime,img)
		for v in vehicles:
			if v.getTracking()==True:

				for p in v.getPoints():
					cv2.circle(image_np,p,3,(200,150,75),6)

			#print(ymin*im_height,xmin*im_width,ymax*im_height,xmax*im_width)
			#cv2.rectangle(image_np,(int(xmin*im_width),int(ymin*im_height)),(int(xmax*im_width),int(ymax*im_height)),(255,0,0),2)
		cv2.line(image_np, (int(xl1),int(yl1)), (int(xl2),int(yl2)), (0,255,0),3)
		cv2.line(image_np, (int(xl3),int(yl3)), (int(xl4),int(yl4)), (0,0,255),3)
		cv2.line(image_np, (int(xl5),int(yl5)), (int(xl6),int(yl6)), (255,0,0),3)
		VideoFileOutput.write(image_np)
		#print('yola')
		frame=cv2.resize(image_np,(1020,647))
		cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
		img = Image.fromarray(cv2image)
		imgtk = ImageTk.PhotoImage(image=img)
		display1.imgtk = imgtk #Shows frame for display 1
		display1.configure(image=imgtk)
	window.after(1, main) 
开发者ID:ShreyAmbesh,项目名称:Traffic-Rule-Violation-Detection-System,代码行数:55,代码来源:VehicleMoniter.py


注:本文中的cv2.COLOR_BGR2RGBA属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。