本文整理匯總了Python中cv2.COLOR_BGR2RGB屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_BGR2RGB屬性的具體用法?Python cv2.COLOR_BGR2RGB怎麽用?Python cv2.COLOR_BGR2RGB使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.COLOR_BGR2RGB屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: worker
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output_q.put(detect_objects(frame_rgb, sess, detection_graph))
fps.stop()
sess.close()
示例2: worker
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
fps = FPS().start()
while True:
myprint("updata start ", time.time())
fps.update()
myprint("updata end ", time.time())
# global lock
# if lock.acquire():
# lock.release()
frame = input_q.get()
myprint("out queue {} and input que size {} after input_q get".format(output_q.qsize(), input_q.qsize()), time.time())
myprint("out queue {} and input que size {} after lock release ".format(output_q.qsize(), input_q.qsize()), time.time())
myprint("face process start", time.time())
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out_frame = face_process(frame)
myprint("out queue {} and input que size {}".format(output_q.qsize(), input_q.qsize()), time.time())
output_q.put(out_frame)
myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())
fps.stop()
示例3: __getitem__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def __getitem__(self, index, to_tensor=True):
fn = self.image_fns[index]
img = cv2.cvtColor(cv2.imread(fn, 1), cv2.COLOR_BGR2RGB)
img, pad_top, pad_left = KuzushijiDataset.pad_to_ratio(img, ratio=1.5)
h, w = img.shape[:2]
# print(h / w, pad_left, pad_top)
assert img.ndim == 3
scaled_imgs = []
for scale in self.scales:
h_scale = int(scale * self.height)
w_scale = int(scale * self.width)
simg = cv2.resize(img, (w_scale, h_scale))
if to_tensor:
assert simg.ndim == 3, simg.ndim
simg = simg.transpose((2, 0, 1))
simg = th.from_numpy(simg.copy())
scaled_imgs.append(simg)
return scaled_imgs + [fn]
示例4: loop2
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def loop2(self,text,w=1280,h=720):
cap = cv2.VideoCapture(int(text))
cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
global capnum2
capnum2 = int(text)
cap.set(3,w);
cap.set(4,h);
global update2
update2 = 1
global shotmark2
while (update2 == 1):
ret, frame = cap.read()
if shotmark2 == 1:
fn = self.lineEdit.text()
name = "photo/2_"+fn + "video.jpg"
if os.path.exists(name):
name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
cv2.imwrite(name, frame)
shotmark2 = 0
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.original2_image.updateImage(frame)
# cap.release()
cv_img_rgb = np.zeros((700,700,3))
self.original2_image.updateImage(cv_img_rgb)
示例5: get_imgtk
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def get_imgtk(self, img_bgr):
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
wide = imgtk.width()
high = imgtk.height()
if wide > self.viewwide or high > self.viewhigh:
wide_factor = self.viewwide / wide
high_factor = self.viewhigh / high
factor = min(wide_factor, high_factor)
wide = int(wide * factor)
if wide <= 0 : wide = 1
high = int(high * factor)
if high <= 0 : high = 1
im=im.resize((wide, high), Image.ANTIALIAS)
imgtk = ImageTk.PhotoImage(image=im)
return imgtk
示例6: show_roi
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def show_roi(self, r, roi, color):
if r :
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi = Image.fromarray(roi)
self.imgtk_roi = ImageTk.PhotoImage(image=roi)
self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
self.r_ctl.configure(text=str(r))
self.update_time = time.time()
try:
c = self.color_transform[color]
self.color_ctl.configure(text=c[0], background=c[1], state='enable')
except:
self.color_ctl.configure(state='disabled')
elif self.update_time + 8 < time.time():
self.roi_ctl.configure(state='disabled')
self.r_ctl.configure(text="")
self.color_ctl.configure(state='disabled')
示例7: get_input
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def get_input(self, face_img):
ret = self.detector.detect_face(face_img, det_type = self.args.det)
if ret is None:
return None
bbox, points = ret
if bbox.shape[0]==0:
return None
bbox = bbox[0,0:4]
points = points[0,:].reshape((2,5)).T
#print(bbox)
#print(points)
nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2,0,1))
input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
return db
示例8: get
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def get(self, img):
assert self.param_file and self.model
assert img.shape[2]==3 and img.shape[0:2]==self.image_size
data = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
data = np.transpose(data, (2,0,1))
data = np.expand_dims(data, axis=0)
data = mx.nd.array(data)
db = mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
ret = self.model.get_outputs()[0].asnumpy()
g = ret[:,0:2].flatten()
gender = np.argmax(g)
a = ret[:,2:202].reshape( (100,2) )
a = np.argmax(a, axis=1)
age = int(sum(a))
return gender, age
示例9: convert_to_img
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def convert_to_img(self):
def to_img(i):
cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
return True
raw_data_shape = self.raw_data.shape # (N, H * W * C)
try:
assert os.path.exists(self.save_file_name)
except AssertionError:
print("[-] There's no %s :(" % self.save_file_name)
print("[*] Make directory at %s... " % self.save_file_name)
os.mkdir(self.save_file_name)
ii = [i for i in range(raw_data_shape[0])]
pool = Pool(self.n_threads)
print(pool.map(to_img, ii))
示例10: debug_visualize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def debug_visualize(self, data_yx_min, data_yx_max, yx_min, yx_max, c, tp, path):
canvas = cv2.imread(path)
canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
size = np.reshape(np.array(canvas.shape[:2], np.float32), [1, 2])
data_yx_min, data_yx_max, yx_min, yx_max = (np.reshape(t.cpu().numpy(), [-1, 2]) * size for t in (data_yx_min, data_yx_max, yx_min, yx_max))
canvas = self.draw_bbox(canvas, data_yx_min, data_yx_max, colors=['g'])
canvas = self.draw_bbox(canvas, *(a[tp] for a in (yx_min, yx_max)), colors=['w'])
fp = ~tp
canvas = self.draw_bbox(canvas, *(a[fp] for a in (yx_min, yx_max)), colors=['k'])
fig = plt.figure()
ax = fig.gca()
ax.imshow(canvas)
ax.set_title('tp=%d, fp=%d' % (np.sum(tp), np.sum(fp)))
fig.canvas.set_window_title(self.category[c] + ': ' + path)
plt.show()
plt.close(fig)
示例11: detect
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def detect(self, img):
img_h, img_w, _ = img.shape
inputs = cv2.resize(img, (self.image_size, self.image_size))
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
inputs = (inputs / 255.0) * 2.0 - 1.0
inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))
result = self.detect_from_cvmat(inputs)[0]
for i in range(len(result)):
result[i][1] *= (1.0 * img_w / self.image_size)
result[i][2] *= (1.0 * img_h / self.image_size)
result[i][3] *= (1.0 * img_w / self.image_size)
result[i][4] *= (1.0 * img_h / self.image_size)
return result
示例12: describeAllJpegsInPath
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def describeAllJpegsInPath(self, path, batch_size, verbose=False):
''' returns a list of descriptors '''
jpeg_paths = sorted(glob.glob(os.path.join(path, '*.jpg')))
descs = []
for batch_offset in range(0, len(jpeg_paths), batch_size):
images = []
for i in range(batch_offset, batch_offset + batch_size):
if i == len(jpeg_paths):
break
if verbose:
print('%d/%d' % (i, len(jpeg_paths)))
if self.is_grayscale:
image = cv2.imread(jpeg_paths[i], cv2.IMREAD_GRAYSCALE)
images.append(np.expand_dims(
np.expand_dims(image, axis=0), axis=-1))
else:
image = cv2.imread(jpeg_paths[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(np.expand_dims(image, axis=0))
batch = np.concatenate(images, 0)
descs = descs + list(self.sess.run(
self.net_out, feed_dict={self.tf_batch: batch}))
return descs
示例13: imread_uint
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
示例14: show_who_in_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
min_im, image, all_frames = self.detect_which(path, get_face)
for (confidance, who), frame in zip(min_im, all_frames):
color = self.colors[who]
x1, x2, y1, y2 = frame
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
if turn_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if show:
cv2.imshow("a", image)
cv2.waitKey(0)
return image
示例15: show_who_in_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2RGB [as 別名]
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
min_im, image, all_frames = self.index_image(path, get_face)
for (confidance, who), frame in zip(min_im, all_frames):
try:
color = self.colors[str(who)]
x1, x2, y1, y2 = frame
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
except KeyError:
continue
if turn_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if show:
cv2.imshow("a", image)
cv2.waitKey(1)
return image, min_im, all_frames