当前位置: 首页>>代码示例>>Python>>正文


Python cv2.cvtColor方法代码示例

本文整理汇总了Python中cv2.cv2.cvtColor方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.cvtColor方法的具体用法?Python cv2.cvtColor怎么用?Python cv2.cvtColor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2.cv2的用法示例。


在下文中一共展示了cv2.cvtColor方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: show

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def show(self):

        self.fig, self.ax = plt.subplots(1, figsize=(11, 8.5))
        _, frame = self.capture.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        self.ax.imshow(frame)

        toggle_selector.RS = RectangleSelector(
            self.ax, self.line_select_callback,
            drawtype='box', useblit=True,
            button=[1], minspanx=5, minspany=5,
            spancoords='pixels', interactive=True,
        )
        self.fig.canvas.mpl_connect('key_press_event', toggle_selector)
        self.fig.canvas.mpl_connect('key_press_event', self.onkeypress)
        plt.tight_layout()
        plt.show()


    # The keybindings attached to the LabelPlot. 
开发者ID:jpnaterer,项目名称:smashscan,代码行数:22,代码来源:label.py

示例2: show_frame

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def show_frame(self):
        self.capture.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame_num)
        _, frame = self.capture.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        self.ax.imshow(frame)

        if self.is_recording:
            current_width = self.br_record[0] - self.tl_record[0]
            current_height = self.br_record[1] - self.tl_record[1]
            self.rect_patch = patches.Rectangle(self.tl_record,
                current_width, current_height,
                linewidth=1, edgecolor='r', facecolor='none')
            self.ax.add_patch(self.rect_patch)

        plt.draw()


    # Function called when left mouse button is clicked and released. 
开发者ID:jpnaterer,项目名称:smashscan,代码行数:20,代码来源:label.py

示例3: _morphological_process

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def _morphological_process(image, kernel_size=5):
        """

        :param image:
        :param kernel_size:
        :return:
        """
        if image.dtype is not np.uint8:
            image = np.array(image, np.uint8)
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))

        # close operation fille hole
        closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)

        return closing 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:20,代码来源:lanenet_postprocess.py

示例4: execute

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def execute(self,data,batch_size):
          sess=self.output['sess']
          x=self.output['x']
          y_=self.output['y_']
          decoder = data_utils.TextFeatureIO()
          ret=[]
          for i in range(batch_size):
              image = Image.open(data[i])
              image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR)
              image = cv2.resize(image, (config.cfg.TRAIN.width, 32))
              image = np.expand_dims(image, axis=0).astype(np.float32)
              preds = sess.run(y_, feed_dict={x:image})
              preds = decoder.writer.sparse_tensor_to_str(preds[0])[0]+'\n'
              ret.append(preds)
          return ret 
开发者ID:ucloud,项目名称:uai-sdk,代码行数:17,代码来源:ocr_inference.py

示例5: execute

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def execute(self,data,batch_size):
          sess=self.output['sess']
          x=self.output['x']
          y=self.output['y']
          decoder = data_utils.TextFeatureIO()
          ret=[]
          for i in range(batch_size):
              image = Image.open(data[i])
              image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR)
              image = cv2.resize(image, (100, 32))
              image = np.expand_dims(image, axis=0).astype(np.float32)
              preds = sess.run(decodes, feed_dict={x:image})
              preds = decoder.writer.sparse_tensor_to_str(preds[0])[0]+'\n'
              ret.append(preds)
          return ret 
开发者ID:ucloud,项目名称:uai-sdk,代码行数:17,代码来源:inference.py

示例6: _connect_components_analysis

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def _connect_components_analysis(image):
        """

        :param image:
        :return:
        """
        if len(image.shape) == 3:
            gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray_image = image

        return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S) 
开发者ID:stesha2016,项目名称:lanenet-enet-hnet,代码行数:14,代码来源:lanenet_postprocess.py

示例7: __whatlike_filter_image

# 需要导入模块: from cv2 import cv2 [as 别名]
# 或者: from cv2.cv2 import cvtColor [as 别名]
def __whatlike_filter_image(self, image):
        """
        Use weight hat like filter filter the single image
        :param image:
        :return:
        """
        if image is None:
            raise ValueError('Image data is invalid')
        if image.shape[2] == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        assert image.shape[0] == image.shape[1] == self.__cfg.ROI.TOP_CROP_HEIGHT

        # initialize the weight hat like filter
        whatlikefilter = filter_util.WHatLikeFilter([self.__cfg.TEST.HAT_LIKE_FILTER_WINDOW_HEIGHT,
                                                     self.__cfg.TEST.HAT_LIKE_FILTER_WINDOW_WIDTH])

        # set the input tensor
        input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, self.__cfg.ROI.TOP_CROP_WIDTH,
                                                               self.__cfg.ROI.TOP_CROP_WIDTH, 1], name='Input_Image')
        input_image = image[np.newaxis, :, :, np.newaxis]

        # set sess config
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = self.__cfg.TEST.GPU_MEMORY_FRACTION
        config.gpu_options.allow_growth = self.__cfg.TEST.TF_ALLOW_GROWTH

        sess = tf.Session(config=config)

        with sess.as_default():

            init = tf.global_variables_initializer()
            sess.run(init)

            filter_result = sess.run(whatlikefilter.filter(img=input_tensor), feed_dict={input_tensor: input_image})

        return filter_result 
开发者ID:MaybeShewill-CV,项目名称:DVCNN_Lane_Detection,代码行数:39,代码来源:extract_roi.py


注:本文中的cv2.cv2.cvtColor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。