當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.CV_LOAD_IMAGE_GRAYSCALE屬性代碼示例

本文整理匯總了Python中cv2.CV_LOAD_IMAGE_GRAYSCALE屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CV_LOAD_IMAGE_GRAYSCALE屬性的具體用法?Python cv2.CV_LOAD_IMAGE_GRAYSCALE怎麽用?Python cv2.CV_LOAD_IMAGE_GRAYSCALE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.CV_LOAD_IMAGE_GRAYSCALE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: E

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def E():
    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 3))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        landmarkGt = landmarkGt[2:, :]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = NM(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error 
開發者ID:luoyetx,項目名稱:deep-landmark,代碼行數:19,代碼來源:NM.py

示例2: E

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def E():
    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 3))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        landmarkGt = landmarkGt[:3, :]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = EN(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error 
開發者ID:luoyetx,項目名稱:deep-landmark,代碼行數:19,代碼來源:EN.py

示例3: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def detect(self, obj, event):
        # First, reset image, in case of previous detections:
        active_handle = self.get_active('Media')
        media = self.dbstate.db.get_media_from_handle(active_handle)
        self.load_image(media)
        min_face_size = (50, 50)  # FIXME: get from setting
        self.cv_image = cv2.LoadImage(self.full_path,
                                      cv2.CV_LOAD_IMAGE_GRAYSCALE)
        o_width, o_height = self.cv_image.width, self.cv_image.height
        cv2.EqualizeHist(self.cv_image, self.cv_image)
        cascade = cv2.Load(HAARCASCADE_PATH)
        faces = cv2.HaarDetectObjects(self.cv_image, cascade,
                                      cv2.CreateMemStorage(0),
                                      1.2, 2, cv2.CV_HAAR_DO_CANNY_PRUNING,
                                      min_face_size)
        references = self.find_references()
        rects = []
        o_width, o_height = [float(t) for t in (self.cv_image.width,
                                                self.cv_image.height)]
        for ((x, y, width, height), neighbors) in faces:
            # percentages:
            rects.append((x / o_width, y / o_height,
                          width / o_width, height / o_height))
        self.draw_rectangles(rects, references) 
開發者ID:gramps-project,項目名稱:addons-source,代碼行數:26,代碼來源:FaceDetection.py

示例4: format_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def format_image(image):
  if len(image.shape) > 2 and image.shape[2] == 3:
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  else:
    image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
  faces = cascade_classifier.detectMultiScale(
      image,
      scaleFactor = 1.3 ,
      minNeighbors = 5
  )
  if not len(faces) > 0:
    return None
  max_area_face = faces[0]
  for face in faces:
    if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
      max_area_face = face
  face = max_area_face
  image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
  try:
    image = cv2.resize(image, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
  except Exception:
    print("[+] Problem during resize")
    return None
  return image 
開發者ID:nimish1512,項目名稱:Emotion-recognition-and-prediction,代碼行數:26,代碼來源:run.py

示例5: load_heatmaps

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def load_heatmaps(self, load_dir):

        heatmaps = []
        for step_i in range(self.step_total-self.step_total_offset):
            try:
                temp = cv2.imread(
                    '{}/{}.jpg'.format(
                        load_dir,
                        step_i,
                    ),
                    cv2.CV_LOAD_IMAGE_GRAYSCALE,
                )
                temp = cv2.resize(temp,(self.heatmap_width, self.heatmap_height))
                temp = temp / 255.0
                heatmaps += [temp]
            except Exception,e:
                raise Exception(Exception,":",e) 
開發者ID:YuhangSong,項目名稱:DHP,代碼行數:19,代碼來源:envs.py

示例6: read_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def read_image(img_path, **kwargs):
  mode = kwargs.get('mode', 'rgb')
  layout = kwargs.get('layout', 'HWC')
  if mode=='gray':
    img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
  else:
    img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_COLOR)
    if mode=='rgb':
      #print('to rgb')
      img = img[...,::-1]
    if layout=='CHW':
      img = np.transpose(img, (2,0,1))
  return img 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:15,代碼來源:face_preprocess.py

示例7: generate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-3
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.01)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s'%name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/3_%s/%s.h5'%(name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/3_%s/%s.txt'%(name, mode), 'w') as fd:
            fd.write('train/3_%s/%s.h5'%(name, mode)) 
開發者ID:luoyetx,項目名稱:deep-landmark,代碼行數:41,代碼來源:level3.py

示例8: generate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-2
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.05)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s'%name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/2_%s/%s.h5'%(name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/2_%s/%s.txt'%(name, mode), 'w') as fd:
            fd.write('train/2_%s/%s.h5'%(name, mode)) 
開發者ID:luoyetx,項目名稱:deep-landmark,代碼行數:41,代碼來源:level2.py

示例9: E

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def E(level=1):
    if level == 0:
        from common import level1 as P
        P = partial(P, FOnly=True) # high order function, here we only test LEVEL-1 F CNN
    elif level == 1:
        from common import level1 as P
    elif level == 2:
        from common import level2 as P
    else:
        from common import level3 as P

    data = getDataFromTxt(TXT)
    error = np.zeros((len(data), 5))
    for i in range(len(data)):
        imgPath, bbox, landmarkGt = data[i]
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkP = P(img, bbox)

        # real landmark
        landmarkP = bbox.reprojectLandmark(landmarkP)
        landmarkGt = bbox.reprojectLandmark(landmarkGt)
        error[i] = evaluateError(landmarkGt, landmarkP, bbox)
    return error 
開發者ID:luoyetx,項目名稱:deep-landmark,代碼行數:28,代碼來源:test.py

示例10: generate_bg

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def generate_bg(num_bg_images):
    found = False
    while not found:
        fname = "bgs/{:08d}.jpg".format(random.randint(0, num_bg_images - 1))
        bg = cv2.imread(fname, cv2.CV_LOAD_IMAGE_GRAYSCALE) / 255.
        if (bg.shape[1] >= OUTPUT_SHAPE[1] and
            bg.shape[0] >= OUTPUT_SHAPE[0]):
            found = True

    x = random.randint(0, bg.shape[1] - OUTPUT_SHAPE[1])
    y = random.randint(0, bg.shape[0] - OUTPUT_SHAPE[0])
    bg = bg[y:y + OUTPUT_SHAPE[0], x:x + OUTPUT_SHAPE[1]]

    return bg 
開發者ID:matthewearl,項目名稱:deep-anpr,代碼行數:16,代碼來源:gen.py

示例11: im_from_file

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def im_from_file(f):
    a = numpy.asarray(bytearray(f.read()), dtype=numpy.uint8)
    return cv2.imdecode(a, cv2.CV_LOAD_IMAGE_GRAYSCALE) 
開發者ID:matthewearl,項目名稱:deep-anpr,代碼行數:5,代碼來源:extractbgs.py

示例12: get_word_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def get_word_image(self, gray_scale=True):
        col_type = None
        if gray_scale:
            col_type = cv2.CV_LOAD_IMAGE_GRAYSCALE
        else:
            col_type = cv2.CV_LOAD_IMAGE_COLOR
        
        # load the image
        ul = self.bounding_box['upperLeft']
        wh = self.bounding_box['widthHeight']
        img = cv2.imread(self.image_path, col_type)
        if not np.all(self.bounding_box['widthHeight'] == -1):
            img = img[ul[1]:ul[1]+wh[1], ul[0]:ul[0]+wh[0]]
        return img 
開發者ID:ssudholt,項目名稱:phocnet,代碼行數:16,代碼來源:word_container.py

示例13: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def main(img_dir, output_dir, pretrained_phocnet, deploy_proto, min_image_width_height, gpu_id):
	logging_format = '[%(asctime)-19s, %(name)s, %(levelname)s] %(message)s'
	logging.basicConfig(level=logging.INFO,
                        format=logging_format)
	logger = logging.getLogger('Predict PHOCs')
	
	if gpu_id is None:
		caffe.set_mode_cpu()
	else:
		caffe.set_mode_gpu()
		caffe.set_device(gpu_id)
	
	logger.info('Loading PHOCNet...')
	phocnet = caffe.Net(deploy_proto, caffe.TEST, weights=pretrained_phocnet)
	
	# find all images in the supplied dir
	logger.info('Found %d word images to process', len(os.listdir(img_dir)))
	word_img_list = [cv2.imread(os.path.join(img_dir, filename), cv2.CV_LOAD_IMAGE_GRAYSCALE) 
					 for filename in sorted(os.listdir(img_dir)) if filename not in ['.', '..']]
	# push images through the PHOCNet
	logger.info('Predicting PHOCs...')
	predicted_phocs = net_output_for_word_image_list(phocnet=phocnet, word_img_list=word_img_list, 
													min_img_width_height=min_image_width_height)
	# save everything
	logger.info('Saving...')
	np.save(os.path.join(output_dir, 'predicted_phocs.npy'), predicted_phocs)
	logger.info('Finished') 
開發者ID:ssudholt,項目名稱:phocnet,代碼行數:29,代碼來源:predict_phocs.py

示例14: get_full_size_labels

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def get_full_size_labels(self, img_ids, timespan=None):
    """Get full sized labels."""
    if timespan is None:
      timespan = self.get_default_timespan()
    with h5py.File(self.h5_fname, 'r') as h5f:
      num_ex = len(img_ids)
      y_full = []
      for kk, ii in enumerate(img_ids):
        key = self.get_str_id(ii)
        data_group = h5f[key]
        if 'label_segmentation_full_size' in data_group:
          y_gt_group = data_group['label_segmentation_full_size']
          num_obj = len(y_gt_group.keys())
          y_full_kk = None
          for jj in xrange(min(num_obj, timespan)):
            y_full_jj_str = y_gt_group['{:02d}'.format(jj)][:]
            y_full_jj = cv2.imdecode(
                y_full_jj_str, cv2.CV_LOAD_IMAGE_GRAYSCALE).astype('float32')
            if y_full_kk is None:
              y_full_kk = np.zeros(
                  [timespan, y_full_jj.shape[0], y_full_jj.shape[1]])
            y_full_kk[jj] = y_full_jj
          y_full.append(y_full_kk)
        else:
          y_full.append(np.zeros([timespan] + list(data_group['orig_size'][:])))
    return y_full 
開發者ID:renmengye,項目名稱:rec-attend-public,代碼行數:28,代碼來源:ins_seg_dataset.py

示例15: read_pgm

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CV_LOAD_IMAGE_GRAYSCALE [as 別名]
def read_pgm(filename):
    img1 = cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    h, w = img1.shape[:2]
    vis0 = np.zeros((h,w), np.float32)
    vis0[:h, :w] = img1
    return vis0
            

#This method is used to read cover and stego images.
#We consider that stego images can be steganographied with differents keys (in practice this seems to be inefficient...) 
開發者ID:rcouturier,項目名稱:steganalysis_with_CNN_and_SRM,代碼行數:12,代碼來源:conv_stego20.py


注:本文中的cv2.CV_LOAD_IMAGE_GRAYSCALE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。