当前位置: 首页>>代码示例>>Python>>正文


Python cv2.imdecode方法代码示例

本文整理汇总了Python中cv2.imdecode方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.imdecode方法的具体用法?Python cv2.imdecode怎么用?Python cv2.imdecode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.imdecode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def image(self, captcha_str):
        """
        Generate a greyscale captcha image representing number string

        Parameters
        ----------
        captcha_str: str
            string a characters for captcha image

        Returns
        -------
        numpy.ndarray
            Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
        """
        img = self.captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (self.h, self.w))
        img = img.transpose(1, 0)
        img = np.multiply(img, 1 / 255.0)
        return img 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,代码来源:captcha_generator.py

示例2: frames

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def frames():
        with PiCamera() as camera:
            camera.rotation = int(str(os.environ['CAMERA_ROTATION']))
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream, 'jpeg',
                                               use_video_port=True):
                # return current frame
                stream.seek(0)
                _stream = stream.getvalue()
                data = np.fromstring(_stream, dtype=np.uint8)
                img = cv2.imdecode(data, 1)
                yield img

                # reset stream for next frame
                stream.seek(0)
                stream.truncate() 
开发者ID:cristianpb,项目名称:object-detection,代码行数:18,代码来源:camera_pi.py

示例3: imread

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def imread(filename, flags=cv2.IMREAD_COLOR):
    global _im_zfile
    path = filename
    pos_at = path.index('@')
    if pos_at == -1:
        print("character '@' is not found from the given path '%s'"%(path))
        assert 0
    path_zip = path[0: pos_at]
    path_img = path[pos_at + 2:]
    if not os.path.isfile(path_zip):
        print("zip file '%s' is not found"%(path_zip))
        assert 0
    for i in range(len(_im_zfile)):
        if _im_zfile[i]['path'] == path_zip:
            data = _im_zfile[i]['zipfile'].read(path_img)
            return cv2.imdecode(np.frombuffer(data, np.uint8), flags)

    _im_zfile.append({
        'path': path_zip,
        'zipfile': zipfile.ZipFile(path_zip, 'r')
    })
    data = _im_zfile[-1]['zipfile'].read(path_img)

    return cv2.imdecode(np.frombuffer(data, np.uint8), flags) 
开发者ID:facebookresearch,项目名称:PoseWarper,代码行数:26,代码来源:zipreader.py

示例4: imdecode

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def imdecode(str_img, flag=1):
    """Decode image from str buffer.
    Wrapper for cv2.imdecode that uses mx.nd.NDArray

    Parameters
    ----------
    str_img : str
        str buffer read from image file
    flag : int
        same as flag for cv2.imdecode
    Returns
    -------
    img : NDArray
        decoded image in (width, height, channels)
        with BGR color channel order
    """
    hdl = NDArrayHandle()
    check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img),
                                 mx_uint(len(str_img)),
                                 flag, ctypes.byref(hdl)))
    return mx.nd.NDArray(hdl) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,代码来源:opencv.py

示例5: next

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def next(self):
        """Move iterator position forward"""
        batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
        i = self.cur
        for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
            str_img = open(self.root+self.list[i]+'.jpg').read()
            img = imdecode(str_img, 1)
            img, _ = random_crop(img, self.size)
            batch[i - self.cur] = img
        batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
        ret = mx.io.DataBatch(data=[batch],
                              label=[],
                              pad=self.batch_size-(i-self.cur),
                              index=None)
        self.cur = i
        return ret 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:opencv.py

示例6: test_lmdb_train

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def test_lmdb_train(db, augs, batch):
    ds = LMDBData(db, shuffle=False)
    ds = LocallyShuffleData(ds, 50000)
    ds = MultiProcessRunner(ds, 5000, 1)
    return ds

    ds = LMDBDataPoint(ds)

    def f(x):
        return cv2.imdecode(x, cv2.IMREAD_COLOR)
    ds = MapDataComponent(ds, f, 0)
    ds = AugmentImageComponent(ds, augs)

    ds = BatchData(ds, batch, use_list=True)
    # ds = PlasmaPutData(ds)
    ds = MultiProcessRunnerZMQ(ds, 40, hwm=80)
    # ds = PlasmaGetData(ds)
    return ds 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:20,代码来源:benchmark-dataflow.py

示例7: test_lmdb_inference

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def test_lmdb_inference(db, augs, batch):
    ds = LMDBData(db, shuffle=False)
    # ds = LocallyShuffleData(ds, 50000)

    augs = AugmentorList(augs)

    def mapper(data):
        im, label = loads(data[1])
        im = cv2.imdecode(im, cv2.IMREAD_COLOR)
        im = augs.augment(im)
        return im, label

    ds = MultiProcessMapData(ds, 40, mapper,
                             buffer_size=200)
    # ds = MultiThreadMapData(ds, 40, mapper, buffer_size=2000)

    ds = BatchData(ds, batch)
    ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:21,代码来源:benchmark-dataflow.py

示例8: get_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def get_data(self):
        idxs = np.arange(len(self.train_list))
        if self.shuffle:
            self.rng.shuffle(idxs)

        caches = {}
        for i, k in enumerate(idxs):
            path = self.train_list[k]
            label = self.lb_list[k]

            if i % self.preload == 0:
                try:
                    caches = ILSVRCTenth._read_tenth_batch(self.train_list[idxs[i:i+self.preload]])
                except Exception as e:
                    logging.warning('tenth local cache failed, err=%s' % str(e))

            content = caches.get(path, '')
            if not content:
                content = ILSVRCTenth._read_tenth(path)

            img = cv2.imdecode(np.fromstring(content, dtype=np.uint8), cv2.IMREAD_COLOR)
            yield [img, label] 
开发者ID:ildoonet,项目名称:tf-lcnn,代码行数:24,代码来源:data_feeder.py

示例9: get_frame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def get_frame(self) -> bytearray:
        try:
            self.sockt.send(str.encode(self.url))
            data = b''
            while True:
                try:
                    r = self.sockt.recv(90456)
                    if len(r) == 0:
                        break
                    a = r.find(b'END!')
                    if a != -1:
                        data += r[:a]
                        break
                    data += r
                except Exception as e:
                    print(e)
                    continue
            nparr = numpy.fromstring(data, numpy.uint8)
            frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            return frame
        except Exception as e:
            print(e) 
开发者ID:Benehiko,项目名称:ReolinkCameraAPI,代码行数:24,代码来源:RtspClient.py

示例10: motion_blur

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def motion_blur(x, severity=1):
    c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (224, 224):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
开发者ID:hendrycks,项目名称:robustness,代码行数:18,代码来源:corruptions.py

示例11: motion_blur

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def motion_blur(x, severity=1):
    c = [(10,1), (10,1.5), (10,2), (10,2.5), (12,3)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (64, 64):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
开发者ID:hendrycks,项目名称:robustness,代码行数:18,代码来源:make_tinyimagenet_c.py

示例12: motion_blur

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def motion_blur(x, severity=1):
    c = [(12,4), (17,6), (17, 9), (17,13), (22,16)][severity - 1]

    output = BytesIO()
    x.save(output, format='PNG')
    x = MotionImage(blob=output.getvalue())

    x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))

    x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
                     cv2.IMREAD_UNCHANGED)

    if x.shape != (299, 299):
        return np.clip(x[..., [2, 1, 0]], 0, 255)  # BGR to RGB
    else:  # greyscale to RGB
        return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) 
开发者ID:hendrycks,项目名称:robustness,代码行数:18,代码来源:make_imagenet_c_inception.py

示例13: consumer

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def consumer(self, result_wrapper):
        if len(result_wrapper.results) != 1:
            logger.error('Got %d results from server',
                         len(result_wrapper.results))
            return

        result = result_wrapper.results[0]
        if result.payload_type != gabriel_pb2.PayloadType.IMAGE:
            type_name = gabriel_pb2.PayloadType.Name(result.payload_type)
            logger.error('Got result of type %s', type_name)
            return

        np_data = np.fromstring(result.payload, dtype=np.uint8)
        frame = cv2.imdecode(np_data, cv2.IMREAD_COLOR)

        self._consume_frame(frame, result_wrapper.extras) 
开发者ID:cmusatyalab,项目名称:gabriel,代码行数:18,代码来源:opencv_adapter.py

示例14: capture

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def capture(self, method=FROM_SHELL) -> Union[np.ndarray, None]:
        """
        Capture the screen.

        :return: a cv2 image as numpy ndarray
        """
        if method == self.FROM_SHELL:
            self.logger.debug('Capturing screen from shell...')
            img = self.__run_cmd(['shell', 'screencap -p'], raw=True)
            img = self.__png_sanitize(img)
            img = np.frombuffer(img, np.uint8)
            img = cv.imdecode(img, cv.IMREAD_COLOR)
            return img
        elif method == self.SDCARD_PULL:
            self.logger.debug('Capturing screen from sdcard pull...')
            self.__run_cmd(['shell', 'screencap -p /sdcard/sc.png'])
            self.__run_cmd(['pull', '/sdcard/sc.png', './sc.png'])
            img = cv.imread('./sc.png', cv.IMREAD_COLOR)
            return img
        else:
            self.logger.error('Unsupported screen capturing method.')
            return None 
开发者ID:will7101,项目名称:fgo-bot,代码行数:24,代码来源:device.py

示例15: create_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import imdecode [as 别名]
def create_image(self, buffer):
        # FIXME: opencv doesn't support gifs, even worse, the library
        # segfaults when trying to decoding a gif. An exception is a
        # less drastic measure.
        try:
            if FORMATS[self.extension] == 'GIF':
                raise ValueError("opencv doesn't support gifs")
        except KeyError:
            pass

        img = cv2.imdecode(np.frombuffer(buffer, np.uint8), -1)
        if FORMATS[self.extension] == 'JPEG':
            self.exif = None
            try:
                info = JpegFile.fromString(buffer).get_exif()
                if info:
                    self.exif = info.data
                    self.exif_marker = info.marker
            except Exception:
                pass
        return img 
开发者ID:thumbor,项目名称:opencv-engine,代码行数:23,代码来源:engine_cv3.py


注:本文中的cv2.imdecode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。