当前位置: 首页>>代码示例>>Python>>正文


Python cv2.WINDOW_NORMAL属性代码示例

本文整理汇总了Python中cv2.WINDOW_NORMAL属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.WINDOW_NORMAL属性的具体用法?Python cv2.WINDOW_NORMAL怎么用?Python cv2.WINDOW_NORMAL使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.WINDOW_NORMAL属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: createFigureAndSlider

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def createFigureAndSlider(name, state_dim):
    """
    Creating a window for the latent space visualization, an another for the slider to control it
    :param name: name of model (str)
    :param state_dim: (int)
    :return:
    """
    # opencv gui setup
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, 500, 500)
    cv2.namedWindow('slider for ' + name)
    # add a slider for each component of the latent space
    for i in range(state_dim):
        # the sliders MUST be between 0 and max, so we placed max at 100, and start at 50
        # So that when we substract 50 and divide 10 we get [-5,5] for each component
        cv2.createTrackbar(str(i), 'slider for ' + name, 50, 100, (lambda a: None)) 
开发者ID:araffin,项目名称:srl-zoo,代码行数:18,代码来源:enjoy_latent.py

示例2: run

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def run(self):
        window_name = "Olympe Streaming Example"
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        main_thread = next(
            filter(lambda t: t.name == "MainThread", threading.enumerate())
        )
        while main_thread.is_alive():
            with self.flush_queue_lock:
                try:
                    yuv_frame = self.frame_queue.get(timeout=0.01)
                except queue.Empty:
                    continue
                try:
                    self.show_yuv_frame(window_name, yuv_frame)
                except Exception:
                    # We have to continue popping frame from the queue even if
                    # we fail to show one frame
                    traceback.print_exc()
                finally:
                    # Don't forget to unref the yuv frame. We don't want to
                    # starve the video buffer pool
                    yuv_frame.unref()
        cv2.destroyWindow(window_name) 
开发者ID:Parrot-Developers,项目名称:olympe,代码行数:25,代码来源:streaming.py

示例3: prepare

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def prepare(self):
    if self.save_folder and not os.path.exists(self.save_folder):
      try:
        os.makedirs(self.save_folder)
      except OSError:
        assert os.path.exists(self.save_folder),\
            "Error creating "+self.save_folder
    self.cam = Camera.classes[self.camera]()
    self.cam.open(**self.cam_kwargs)
    config = DISConfig(self.cam)
    config.main()
    self.bbox = config.box
    t,img0 = self.cam.get_image()
    self.correl = DIS(img0,bbox=self.bbox)
    if self.show_image:
      try:
        flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
      except AttributeError:
        flags = cv2.WINDOW_NORMAL
      cv2.namedWindow("DISCorrel",flags)
    self.loops = 0
    self.last_fps_print = 0
    self.last_fps_loops = 0 
开发者ID:LaboratoireMecaniqueLille,项目名称:crappy,代码行数:25,代码来源:discorrel.py

示例4: prepare

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def prepare(self):
    if self.save_folder and not os.path.exists(self.save_folder):
      try:
        os.makedirs(self.save_folder)
      except OSError:
        assert os.path.exists(self.save_folder),\
            "Error creating "+self.save_folder
    self.cam = Camera.classes[self.camera]()
    self.cam.open(**self.cam_kwargs)
    self.ve = VE(**self.ve_kwargs)
    config = VE_config(self.cam,self.ve)
    config.main()
    self.ve.start_tracking()
    if self.show_image:
      try:
        flags = cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
      except AttributeError:
        flags = cv2.WINDOW_NORMAL
      cv2.namedWindow("Videoextenso",flags)
    self.loops = 0
    self.last_fps_print = 0
    self.last_fps_loops = 0 
开发者ID:LaboratoireMecaniqueLille,项目名称:crappy,代码行数:24,代码来源:videoExtenso.py

示例5: _init_trackbars

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def _init_trackbars(self):
        trackbars_window_name = "hsv settings"
        cv2.namedWindow(trackbars_window_name, cv2.WINDOW_NORMAL)

        # HSV Lower Bound
        h_min_trackbar = _Trackbar("H min", trackbars_window_name, 0, 255)
        s_min_trackbar = _Trackbar("S min", trackbars_window_name, 0, 255)
        v_min_trackbar = _Trackbar("V min", trackbars_window_name, 0, 255)

        # HSV Upper Bound
        h_max_trackbar = _Trackbar("H max", trackbars_window_name, 255, 255)
        s_max_trackbar = _Trackbar("S max", trackbars_window_name, 255, 255)
        v_max_trackbar = _Trackbar("V max", trackbars_window_name, 255, 255)

        # Kernel for morphology
        kernel_x = _Trackbar("kernel x", trackbars_window_name, 0, 30)
        kernel_y = _Trackbar("kernel y", trackbars_window_name, 0, 30)

        self._trackbars = [h_min_trackbar, s_min_trackbar, v_min_trackbar, h_max_trackbar, s_max_trackbar,
                           v_max_trackbar, kernel_x, kernel_y] 
开发者ID:gaborvecsei,项目名称:Color-Tracker,代码行数:22,代码来源:color_range_detector.py

示例6: cvCaptureVideo

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# MatplotによるWebカメラのキャプチャと表示 
开发者ID:tody411,项目名称:PyIntroduction,代码行数:26,代码来源:video_capture.py

示例7: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main():
    image = data.astronaut()

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    # for value in cycle(np.arange(-255, 255, VAL_PER_STEP)):
    for value in np.arange(-255, 255, VAL_PER_STEP):
        aug = iaa.AddToHueAndSaturation(value=value)
        img_aug = aug.augment_image(image)
        img_aug = iaa.pad(img_aug, bottom=40)
        img_aug = ia.draw_text(img_aug, x=0, y=img_aug.shape[0]-38, text="value=%d" % (value,), size=30)

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP)

    images_aug = iaa.AddToHueAndSaturation(value=(-255, 255), per_channel=True).augment_images([image] * 64)
    ia.imshow(ia.draw_grid(images_aug))

    image = ia.quokka_square((128, 128))
    images_aug = []
    images_aug.extend(iaa.AddToHue().augment_images([image] * 10))
    images_aug.extend(iaa.AddToSaturation().augment_images([image] * 10))
    ia.imshow(ia.draw_grid(images_aug, rows=2)) 
开发者ID:aleju,项目名称:imgaug,代码行数:27,代码来源:check_add_to_hue_and_saturation.py

示例8: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main():
    image = data.astronaut()[..., ::-1]  # rgb2bgr
    print(image.shape)

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    for n_segments in cycle(reversed(np.arange(1, 200, SEGMENTS_PER_STEP))):
        aug = iaa.Superpixels(p_replace=0.75, n_segments=n_segments)
        time_start = time.time()
        img_aug = aug.augment_image(image)
        print("augmented %d in %.4fs" % (n_segments, time.time() - time_start))
        img_aug = ia.draw_text(img_aug, x=5, y=5, text="%d" % (n_segments,))

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP) 
开发者ID:aleju,项目名称:imgaug,代码行数:19,代码来源:check_superpixels.py

示例9: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main():
    image = data.astronaut()

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", image)
    cv2.waitKey(TIME_PER_STEP)

    height, width = image.shape[0], image.shape[1]
    center_x = width // 2
    center_y = height // 2
    r = int(min(image.shape[0], image.shape[1]) / 3)

    for deg in cycle(np.arange(0, 360, DEG_PER_STEP)):
        rad = np.deg2rad(deg-90)
        point_x = int(center_x + r * np.cos(rad))
        point_y = int(center_y + r * np.sin(rad))

        direction = deg / 360
        aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=direction)
        img_aug = aug.augment_image(image)
        img_aug[point_y-POINT_SIZE:point_y+POINT_SIZE+1, point_x-POINT_SIZE:point_x+POINT_SIZE+1, :] =\
            np.array([0, 255, 0])

        cv2.imshow("aug", img_aug)
        cv2.waitKey(TIME_PER_STEP) 
开发者ID:aleju,项目名称:imgaug,代码行数:27,代码来源:check_directed_edge_detect.py

示例10: main_WithColorspace

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main_WithColorspace():
    image = data.astronaut()
    print("image shape:", image.shape)

    aug = WithColorspace(
        from_colorspace="RGB",
        to_colorspace="HSV",
        children=WithChannels(0, Add(50))
    )

    aug_no_colorspace = WithChannels(0, Add(50))

    img_show = np.hstack([
        image,
        aug.augment_image(image),
        aug_no_colorspace.augment_image(image)
    ])

    cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
    cv2.imshow("aug", img_show[..., ::-1])
    cv2.waitKey(TIME_PER_STEP) 
开发者ID:liuguiyangnwpu,项目名称:DL.EyeSight,代码行数:23,代码来源:check_color.py

示例11: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main():
    signal.signal(signal.SIGINT, signal_handler)
    try:
        drone.moveCamera(tilt=-50, pan=0)
        drone.videoEnable()

        cap = cv2.VideoCapture('./bebop.sdp')

        while (True):
            ret, img = cap.read()
            if ret:
                cv2.imshow('img', img)
                cv2.namedWindow('img', cv2.WINDOW_NORMAL)
                cv2.waitKey(1)

            drone.update()

        sys.exit(0)
    except (TypeError) as e:
        pass 
开发者ID:robotika,项目名称:katarina,代码行数:22,代码来源:stream.py

示例12: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create the generator
    generator = create_generator(args)

    # create the display window
    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)

    if args.loop:
        while run(generator, args):
            pass
    else:
        run(generator, args) 
开发者ID:i-pan,项目名称:kaggle-rsna18,代码行数:19,代码来源:debug.py

示例13: demo

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def demo():
    import os
    from vizer.draw import draw_boxes

    yolo = YOLOv3("cfg/yolo_v3.cfg", "weight/yolov3.weights", "cfg/coco.names")
    print("yolo.size =", yolo.size)
    root = "./demo"
    resdir = os.path.join(root, "results")
    os.makedirs(resdir, exist_ok=True)
    files = [os.path.join(root, file) for file in os.listdir(root) if file.endswith('.jpg')]
    files.sort()
    for filename in files:
        img = cv2.imread(filename)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bbox, cls_conf, cls_ids = yolo(img)

        if bbox is not None:
            img = draw_boxes(img, bbox, cls_ids, cls_conf, class_name_map=yolo.class_names)
        # save results
        cv2.imwrite(os.path.join(resdir, os.path.basename(filename)), img[:, :, (2, 1, 0)])
        # imshow
        # cv2.namedWindow("yolo", cv2.WINDOW_NORMAL)
        # cv2.resizeWindow("yolo", 600,600)
        # cv2.imshow("yolo",res[:,:,(2,1,0)])
        # cv2.waitKey(0) 
开发者ID:ZQPei,项目名称:deep_sort_pytorch,代码行数:27,代码来源:detector.py

示例14: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names 
开发者ID:ZQPei,项目名称:deep_sort_pytorch,代码行数:24,代码来源:yolov3_deepsort.py

示例15: show_img_cb

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_NORMAL [as 别名]
def show_img_cb(self,event):
    	try: 


		cv2.namedWindow("RGB_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("RGB_Image", 25, 75)
		
		cv2.namedWindow("Processed_Image", cv2.WINDOW_NORMAL)
		cv2.moveWindow("Processed_Image", 500, 75)

        	# And one for the depth image
		cv2.moveWindow("Depth_Image", 950, 75)
		cv2.namedWindow("Depth_Image", cv2.WINDOW_NORMAL)


        	cv2.imshow("RGB_Image",self.frame)
        	cv2.imshow("Processed_Image",self.display_image)
        	cv2.imshow("Depth_Image",self.depth_display_image)
      		cv2.waitKey(3)
    	except:
		pass 
开发者ID:PacktPublishing,项目名称:Learning-Robotics-using-Python-Second-Edition,代码行数:23,代码来源:cv_bridge_demo.py


注:本文中的cv2.WINDOW_NORMAL属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。