當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.transpose方法代碼示例

本文整理匯總了Python中cv2.transpose方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.transpose方法的具體用法?Python cv2.transpose怎麽用?Python cv2.transpose使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.transpose方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load_bin

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def load_bin(path, image_size):
    try:
        with open(path, 'rb') as f:
            bins, issame_list = pickle.load(f)  # py2
    except UnicodeDecodeError as e:
        with open(path, 'rb') as f:
            bins, issame_list = pickle.load(f, encoding='bytes')  # py3
    data_list = []
    for flip in [0, 1]:
        data = nd.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
        data_list.append(data)
    for i in range(len(issame_list) * 2):
        _bin = bins[i]
        img = mx.image.imdecode(_bin)
        if img.shape[1] != image_size[0]:
            img = mx.image.resize_short(img, image_size[0])
        img = nd.transpose(img, axes=(2, 0, 1))
        for flip in [0, 1]:
            if flip == 1:
                img = mx.ndarray.flip(data=img, axis=2)
            data_list[flip][i][:] = img
        if i % 1000 == 0:
            print('loading bin', i)
    print(data_list[0].shape)
    return (data_list, issame_list) 
開發者ID:944284742,項目名稱:1.FaceRecognition,代碼行數:27,代碼來源:verification.py

示例2: load_bin

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def load_bin(path, image_size):
  try:
    with open(path, 'rb') as f:
      bins, issame_list = pickle.load(f) #py2
  except UnicodeDecodeError as e:
    with open(path, 'rb') as f:
      bins, issame_list = pickle.load(f, encoding='bytes') #py3
  data_list = []
  for flip in [0,1]:
    data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1]))
    data_list.append(data)
  for i in range(len(issame_list)*2):
    _bin = bins[i]
    img = mx.image.imdecode(_bin)
    if img.shape[1]!=image_size[0]:
      img = mx.image.resize_short(img, image_size[0])
    img = nd.transpose(img, axes=(2, 0, 1))
    for flip in [0,1]:
      if flip==1:
        img = mx.ndarray.flip(data=img, axis=2)
      data_list[flip][i][:] = img
    if i%1000==0:
      print('loading bin', i)
  print(data_list[0].shape)
  return (data_list, issame_list) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:verification.py

示例3: load_bin

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def load_bin(path, image_size):
  bins, issame_list = pickle.load(open(path, 'rb'))
  data_list = []
  for flip in [0,1]:
    data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1]))
    data_list.append(data)
  for i in xrange(len(issame_list)*2):
    _bin = bins[i]
    img = mx.image.imdecode(_bin)
    if img.shape[1]!=image_size[0]:
      img = mx.image.resize_short(img, image_size[0])
    img = nd.transpose(img, axes=(2, 0, 1))
    for flip in [0,1]:
      if flip==1:
        img = mx.ndarray.flip(data=img, axis=2)
      data_list[flip][i][:] = img
    if i%1000==0:
      print('loading bin', i)
  print(data_list[0].shape)
  return (data_list, issame_list) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:22,代碼來源:verification.py

示例4: rotate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def rotate(img, angle=90, clockwise=True):
    """
        函數使圖片可順時針或逆時針旋轉90、180、270度.
        默認clockwise=True:順時針旋轉
    """

    def count_clock_rotate(img):
        # 逆時針旋轉90°
        rows, cols = img.shape[:2]
        rotate_img = np.zeros((cols, rows))
        rotate_img = cv2.transpose(img)
        rotate_img = cv2.flip(rotate_img, 0)
        return rotate_img

    # 將角度旋轉轉化為逆時針旋轉90°的次數:
    counter_rotate_time = (4 - angle / 90) % 4 if clockwise else (angle / 90) % 4
    for i in range(int(counter_rotate_time)):
        img = count_clock_rotate(img)

    return img 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:22,代碼來源:aircv.py

示例5: get_test_aug

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def get_test_aug(factor):
    if not factor or factor == 1:
        return [
            [False, False, False]]
    elif factor == 4:
        # transpose, v-flip, h-flip
        return [
            [False, False, False],
            [False, False, True],
            [False, True, False],
            [True, True, True]]
    elif factor == 8:
        # return list of all combinations of flips and transpose
        return ((1 & np.arange(0, 8)[:, np.newaxis] // 2**np.arange(2, -1, -1)) > 0).tolist()
    else:
        print('Invalid augmentation factor')
        return [
            [False, False, False]] 
開發者ID:rwightman,項目名稱:pytorch-planet-amazon,代碼行數:20,代碼來源:dataset.py

示例6: _centre_crop_and_transform

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def _centre_crop_and_transform(self, input_img, scale=1.0, trans=False, vflip=False, hflip=False):
        h, w = input_img.shape[:2]
        cx = w // 2
        cy = h // 2
        crop_w, crop_h = utils.calc_crop_size(self.img_size[0], self.img_size[1], scale=scale)
        input_img = utils.crop_center(input_img, cx, cy, crop_w, crop_h)
        if trans:
            input_img = cv2.transpose(input_img)
        if hflip or vflip:
            if hflip and vflip:
                c = -1
            else:
                c = 0 if vflip else 1
            input_img = cv2.flip(input_img, flipCode=c)
        if scale != 1.0:
            input_img = cv2.resize(input_img, self.img_size, interpolation=cv2.INTER_LINEAR)
        return input_img 
開發者ID:rwightman,項目名稱:pytorch-planet-amazon,代碼行數:19,代碼來源:dataset.py

示例7: load_bin

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def load_bin(path, image_size):
  bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
  data_list = []
  for flip in [0,1]:
    data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1]))
    data_list.append(data)
  for i in range(len(issame_list)*2):
    _bin = bins[i]
    img = mx.image.imdecode(_bin)
    img = nd.transpose(img, axes=(2, 0, 1))
    for flip in [0,1]:
      if flip==1:
        img = mx.ndarray.flip(data=img, axis=2)
      data_list[flip][i][:] = img
    if i%1000==0:
      print('loading bin', i)
  print(data_list[0].shape)
  return (data_list, issame_list) 
開發者ID:bleakie,項目名稱:MaskInsightface,代碼行數:20,代碼來源:verification.py

示例8: apply_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def apply_image(self, img):
        ret = cv2.transpose(img)
        if img.ndim == 3 and ret.ndim == 2:
            ret = ret[:, :, np.newaxis]
        return ret 
開發者ID:tensorpack,項目名稱:dataflow,代碼行數:7,代碼來源:transform.py

示例9: preprocessor

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def preprocessor(img, imgSize, enhance=False, dataAugmentation=False):
    "put img into target img of size imgSize, transpose for TF and normalize gray-values"

    # there are damaged files in IAM dataset - just use black image instead
    if img is None:
        img = np.zeros([imgSize[1], imgSize[0]]) # (64,800)
        print("Image None!")

    # increase dataset size by applying random stretches to the images
    if dataAugmentation:
        stretch = (random.random() - 0.5)  # -0.5 .. +0.5
        wStretched = max(int(img.shape[1] * (1 + stretch)), 1)  # random width, but at least 1
        img = cv2.resize(img, (wStretched, img.shape[0]))  # stretch horizontally by factor 0.5 .. 1.5

    if enhance: # only if the line text has low contrast and line width is thin
        # increase contrast
        pxmin = np.min(img)
        pxmax = np.max(img)
        imgContrast = (img - pxmin) / (pxmax - pxmin) * 255
        # increase line width (optional)
        kernel = np.ones((3, 3), np.uint8)
        img = cv2.erode(imgContrast, kernel, iterations=1) # increase linewidth

    # create target image and copy sample image into it
    (wt, ht) = imgSize
    (h, w) = img.shape
    fx = w / wt
    fy = h / ht
    f = max(fx, fy)
    newSize = (max(min(wt, int(w / f)), 1),
               max(min(ht, int(h / f)), 1))  # scale according to f (result at least 1 and at most wt or ht)
    img = cv2.resize(img, newSize, interpolation=cv2.INTER_CUBIC) # INTER_CUBIC interpolation best approximate the pixels image
                                                                  # see this https://stackoverflow.com/a/57503843/7338066
    target = np.ones([ht, wt]) * 255  # shape=(64,800)
    target[0:newSize[1], 0:newSize[0]] = img

    # transpose for TF
    img = cv2.transpose(target)

    return img 
開發者ID:sushant097,項目名稱:Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow,代碼行數:42,代碼來源:SamplePreprocessor.py

示例10: rotate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def rotate(self, degrees):
        # see http://stackoverflow.com/a/23990392
        if degrees == 90:
            self.image = cv2.transpose(self.image)
            cv2.flip(self.image, 0, self.image)
        elif degrees == 180:
            cv2.flip(self.image, -1, self.image)
        elif degrees == 270:
            self.image = cv2.transpose(self.image)
            cv2.flip(self.image, 1, self.image)
        else:
            # see http://stackoverflow.com/a/37347070
            # one pixel glitch seems to happen with 90/180/270
            # degrees pictures in this algorithm if you check
            # the typical github.com/recurser/exif-orientation-examples
            # but the above transpose/flip algorithm is working fine
            # for those cases already
            width, height = self.size
            image_center = (width / 2, height / 2)
            rot_mat = cv2.getRotationMatrix2D(image_center, degrees, 1.0)

            abs_cos = abs(rot_mat[0, 0])
            abs_sin = abs(rot_mat[0, 1])
            bound_w = int((height * abs_sin) + (width * abs_cos))
            bound_h = int((height * abs_cos) + (width * abs_sin))

            rot_mat[0, 2] += ((bound_w / 2) - image_center[0])
            rot_mat[1, 2] += ((bound_h / 2) - image_center[1])

            self.image = cv2.warpAffine(self.image, rot_mat, (bound_w, bound_h)) 
開發者ID:thumbor,項目名稱:opencv-engine,代碼行數:32,代碼來源:engine_cv3.py

示例11: set_input

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def set_input(img):
    if type(img) == list:
        img = np.stack(img, axis=0)
    else:
        img = img[np.newaxis, :, :, :]
    img = img.transpose((0, 3, 1, 2))
    return torch.FloatTensor(img) 
開發者ID:siriusdemon,項目名稱:pytorch-PCN,代碼行數:9,代碼來源:pcn.py

示例12: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def detect(img, imgPad, nets):
    img180 = cv2.flip(imgPad, 0)
    img90 = cv2.transpose(imgPad)
    imgNeg90 = cv2.flip(img90, 0)

    winlist = stage1(img, imgPad, nets[0], classThreshold_[0])
    winlist = NMS(winlist, True, nmsThreshold_[0])
    winlist = stage2(imgPad, img180, nets[1], classThreshold_[1], 24, winlist)
    winlist = NMS(winlist, True, nmsThreshold_[1])
    winlist = stage3(imgPad, img180, img90, imgNeg90, nets[2], classThreshold_[2], 48, winlist)
    winlist = NMS(winlist, False, nmsThreshold_[2])
    winlist = deleteFP(winlist)
    return winlist 
開發者ID:siriusdemon,項目名稱:pytorch-PCN,代碼行數:15,代碼來源:pcn.py

示例13: cv2rotateimage

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def cv2rotateimage(image, angle):
  """Efficient rotation if 90 degrees rotations, slow otherwise.

  Not a tensorflow function, using cv2 and scipy on numpy arrays.

  Args:
    image: a numpy array with shape [height, width, channels].
    angle: the rotation angle in degrees in the range [-180, 180].
  Returns:
    The rotated image.
  """
  # Limit angle to [-180, 180] degrees.
  assert angle <= 180 and angle >= -180
  if angle == 0:
    return image
  # Efficient rotations.
  if angle == -90:
    image = cv2.transpose(image)
    image = cv2.flip(image, 0)
  elif angle == 90:
    image = cv2.transpose(image)
    image = cv2.flip(image, 1)
  elif angle == 180 or angle == -180:
    image = cv2.flip(image, 0)
    image = cv2.flip(image, 1)
  else:  # Slow rotation.
    image = ndimage.interpolation.rotate(image, 270)
  return image 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:30,代碼來源:preprocessing.py

示例14: load_shape

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def load_shape(filename, load_triangles = False):
    mesh = plyfile.PlyData.read(filename)
    # convert vertices to numpy array
    vertices = np.transpose(np.vstack((mesh['vertex']['x'],mesh['vertex']['y'],mesh['vertex']['z'])))
    # get triangles
    if load_triangles:
        tridata = mesh['face'].data['vertex_indices']
        triangles = plyfile.make2d(tridata)
        return vertices, triangles
    return vertices 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:12,代碼來源:ShapeUtils2.py

示例15: transform_shape

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import transpose [as 別名]
def transform_shape(shape, R=None, T=None):
    ret_shape = np.copy(shape)
    if R is not None:
        ret_shape = ret_shape @ R.transpose()
    if T is not None:
        ret_shape = np.add(ret_shape, T)
    return ret_shape 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:9,代碼來源:ShapeUtils2.py


注:本文中的cv2.transpose方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。