當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.normalize方法代碼示例

本文整理匯總了Python中cv2.normalize方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.normalize方法的具體用法?Python cv2.normalize怎麽用?Python cv2.normalize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.normalize方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: proc_oflow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def proc_oflow(images):
  h, w = images.shape[-3:-1]

  processed_images = []
  for image in images:
    hsv = np.zeros((h, w, 3), dtype=np.uint8)
    hsv[:, :, 0] = 255
    hsv[:, :, 1] = 255

    mag, ang = cv2.cartToPolar(image[..., 0], image[..., 1])
    hsv[..., 0] = ang*180/np.pi/2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)

    processed_image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    processed_images.append(processed_image)

  return np.stack(processed_images) 
開發者ID:google,項目名稱:graph_distillation,代碼行數:19,代碼來源:imgproc.py

示例2: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def __init__(self, parent, capture, fps=24):
        wx.Panel.__init__(self, parent)
                
        self.capture = capture2
        ret, frame = self.capture.read()


        sal = mr_sal.saliency(frame)
        sal = cv2.resize(sal,(320,240)).astype(sp.uint8)
        sal = cv2.normalize(sal, None, 0, 255, cv2.NORM_MINMAX)
        outsal = cv2.applyColorMap(sal,cv2.COLORMAP_HSV)
        self.bmp = wx.BitmapFromBuffer(320,240, outsal.astype(sp.uint8))

        self.timer = wx.Timer(self)
        self.timer.Start(1000./fps)

        self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.Bind(wx.EVT_TIMER, self.NextFrame) 
開發者ID:ruanxiang,項目名稱:mr_saliency,代碼行數:20,代碼來源:live_demo.py

示例3: normalize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def normalize(
    src: List[int],
    alpha: int = None,
    beta: int = None,
    norm_type: int = None,
    dtype: int = None,
    mask: List[int] = None,
) -> List[int]:
    """
    Normalizes arrays
    """
    src = src.astype("float")
    dst = np.zeros(src.shape)  # Output image array

    parameters = {k: v for k, v in locals().items() if v is not None}

    cv2.normalize(**parameters)
    return dst 
開發者ID:intel,項目名稱:dffml,代碼行數:20,代碼來源:operations.py

示例4: _update_mean_shift_bookkeeping

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def _update_mean_shift_bookkeeping(self, frame, box_grouped):
        """Preprocess all valid bounding boxes for mean-shift tracking

            This method preprocesses all relevant bounding boxes (those that
            have been detected by both mean-shift tracking and saliency) for
            the next mean-shift step.

            :param frame: current RGB input frame
            :param box_grouped: list of bounding boxes
        """
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        self.object_roi = []
        self.object_box = []
        for box in box_grouped:
            (x, y, w, h) = box
            hsv_roi = hsv[y:y + h, x:x + w]
            mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                               np.array((180., 255., 255.)))
            roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
            cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

            self.object_roi.append(roi_hist)
            self.object_box.append(box) 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:26,代碼來源:tracking.py

示例5: calculate_roi_hist

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def calculate_roi_hist(self, frame):
    """Calculates region of interest histogram.

    Args:
      frame: The np.array image frame to calculate ROI histogram for.
    """
    (x, y, w, h) = self.box
    roi = frame[y:y + h, x:x + w]

    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0, 1], mask, [180, 255],
                            [0, 180, 0, 255])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    self.roi_hist = roi_hist

  # Run this every frame 
開發者ID:google,項目名稱:automl-video-ondevice,代碼行數:20,代碼來源:camshift_object_tracker.py

示例6: load_frames

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def load_frames(file_path, resize_to=224.0):
    # Saved numpy files should be read in with format (time, height, width, channel)
    frames = np.load(file_path)
    t, h, w, c = frames.shape

    # Resize and scale images for the network structure
    #TODO: maybe use opencv to normalize the image
    #frames = cv.normalize(frames, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
    frames_out = []
    need_resize = False
    if w < resize_to or h < resize_to:
        d = resize_to - min(w, h)
        sc = 1 + d / min(w, h)
        need_resize = True
    for i in range(t):
        img = frames[i, :, :, :]
        if need_resize:
            img = cv.resize(img, dsize=(0, 0), fx=sc, fy=sc)
        img = (img / 255.) * 2 - 1
        frames_out.append(img)
    return np.asarray(frames_out, dtype=np.float32) 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:23,代碼來源:smoke_video_dataset_cp.py

示例7: compute_dense_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def compute_dense_optical_flow(prev_image, current_image):
  old_shape = current_image.shape
  prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
  current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
  assert current_image.shape == old_shape
  hsv = np.zeros_like(prev_image)
  hsv[..., 1] = 255
  flow = None
  flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
                                      next=current_image_gray, flow=flow,
                                      pyr_scale=0.8, levels=15, winsize=5,
                                      iterations=10, poly_n=5, poly_sigma=0,
                                      flags=10)

  mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
  hsv[..., 0] = ang * 180 / np.pi / 2
  hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
  return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) 
開發者ID:ferreirafabio,項目名稱:video2tfrecord,代碼行數:20,代碼來源:video2tfrecord.py

示例8: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def main():
    src = cv2.imread('src.jpg', cv2.IMREAD_GRAYSCALE)
    tpl = cv2.imread('tpl.jpg', cv2.IMREAD_GRAYSCALE)
    result = cv2.matchTemplate(src, tpl, cv2.TM_CCOEFF_NORMED)
    result = cv2.normalize(result, dst=None, alpha=0, beta=1,
                           norm_type=cv2.NORM_MINMAX, dtype=-1)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
    matchLoc = maxLoc
    draw1 = cv2.rectangle(
        src, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    draw2 = cv2.rectangle(
        result, matchLoc, (matchLoc[0] + tpl.shape[1], matchLoc[1] + tpl.shape[0]), 0, 2, 8, 0)
    cv2.imshow('draw1', draw1)
    cv2.imshow('draw2', draw2)
    cv2.waitKey(0)
    print src.shape
    print tpl.shape
    print result.shape
    print matchLoc 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:21,代碼來源:template_matching.py

示例9: capture_histogram

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def capture_histogram(path_of_sample):

    # read the image
    color = cv2.imread(path_of_sample)

    # convert to HSV
    color_hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)

    # compute the histogram
    object_hist = cv2.calcHist([color_hsv],      # image
                               [0, 1],           # channels
                               None,             # no mask
                               [180, 256],       # size of histogram
                               [0, 180, 0, 256]  # channel values
                               )

    # min max normalization
    cv2.normalize(object_hist, object_hist, 0, 255, cv2.NORM_MINMAX)

    return object_hist 
開發者ID:PacktPublishing,項目名稱:Hands-On-Machine-Learning-with-OpenCV-4,代碼行數:22,代碼來源:object_detection_using_color.py

示例10: generate_target

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def generate_target(object_file, target_name):
    border = 20
    size = [960, 720]

    foreground = cv2.imread(object_file, cv2.IMREAD_UNCHANGED)
    if foreground is None:
        return False
    cv2.normalize(foreground, foreground, 0, 255, cv2.NORM_MINMAX)
    foreground = foreground.astype(numpy.uint8)

    ratio = numpy.amin(numpy.divide(
            numpy.subtract(size, [2*border, 2*border]), foreground.shape[0:2]))
    forground_size = numpy.floor(numpy.multiply(foreground.shape[0:2], ratio)).astype(int)
    foreground = cv2.resize(foreground, (forground_size[1], forground_size[0]))
    foreground = image_fill(foreground,size,[0,0,0,0])

    cv2.imwrite(target_name, foreground) 
開發者ID:eti-p-doray,項目名稱:unet-gan-matting,代碼行數:19,代碼來源:combine.py

示例11: get_blur_im

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def get_blur_im(self):
        """downscale and blur the image"""
        # preprocess image
        dwnscl_factor = 4; # Hydra images' shape is divisible by 4
        blr_sigma = 17; # blur the image a bit, seems to work better
        new_shape = (self.img.shape[1]//dwnscl_factor, # as x,y, not row,columns
                     self.img.shape[0]//dwnscl_factor)

        try:
            dwn_gray_im = cv2.resize(self.img, new_shape)
        except:
            pdb.set_trace()
        # apply blurring
        blur_im = cv2.GaussianBlur(dwn_gray_im, (blr_sigma,blr_sigma),0)
        # normalise between 0 and 255
        blur_im = cv2.normalize(blur_im, None, alpha=0, beta=255,
                                norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        return blur_im 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:20,代碼來源:FOVMultiWellsSplitter.py

示例12: updateROIs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def updateROIs(self):
        #useful for resizing events
        if self.Ifull.size == 0:
            self.twoViews.cleanCanvas()
        else:
            cur = self.ui.tabWidget.currentIndex()
            if cur == self.tab_keys['mask']:
                I1, I2 = self.Ifull, self.Imask
            elif cur == self.tab_keys['bgnd']:
                I1 = self.Ifull
                I2 = np.zeros_like(self.IsubtrB)
                cv2.normalize(self.IsubtrB,I2,0,255,cv2.NORM_MINMAX)
            else:
                I1, I2 = self.Ifull, self.Ifull

            qimage_roi1 = self._numpy2qimage(I1)
            qimage_roi2 = self._numpy2qimage(I2)
            self.twoViews.setPixmap(qimage_roi1, qimage_roi2) 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:20,代碼來源:GetMaskParams.py

示例13: normalize_nn

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def normalize_nn(transM, sigma=1):
    """
    Normalize transition matrix using gaussian weighing
    Input:
        transM: (k,k)
        sigma: var=sigma^2 of gaussian weight between elements
    Output: transM: (k,k)
    """
    # Make weights Gaussian and normalize
    k = transM.shape[0]
    transM[np.nonzero(transM)] = np.exp(
        -np.square(transM[np.nonzero(transM)]) / sigma**2)
    transM[np.arange(k), np.arange(k)] = 1.
    normalization = np.dot(transM, np.ones(k))
    # This is inefficient, bottom line is better ..
    # transM = np.dot(np.diag(1. / normalization), transM)
    transM = (1. / normalization).reshape((-1, 1)) * transM
    return transM 
開發者ID:pathak22,項目名稱:videoseg,代碼行數:20,代碼來源:nlc.py

示例14: consensus_vote

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def consensus_vote(votes, transM, frameEnd, iters):
    """
    Perform iterative consensus voting
    """
    sTime = time.time()
    for t in range(iters):
        votes = np.dot(transM, votes)
        # normalize per frame
        for i in range(frameEnd.shape[0]):
            currStartF = 1 + frameEnd[i - 1] if i > 0 else 0
            currEndF = frameEnd[i]
            frameVotes = np.max(votes[currStartF:1 + currEndF])
            votes[currStartF:1 + currEndF] /= frameVotes + (frameVotes <= 0)
    eTime = time.time()
    print('Consensus voting finished: %.2f s' % (eTime - sTime))
    return votes 
開發者ID:pathak22,項目名稱:videoseg,代碼行數:18,代碼來源:nlc.py

示例15: addModelHistogram

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import normalize [as 別名]
def addModelHistogram(self, model_frame, name=''):
        """Add the histogram to internal container. If the name of the object
           is already present then replace that histogram with a new one.

        @param model_frame the frame to add to the model, its histogram
            is obtained and saved in internal list.
        @param name a string representing the name of the model.
            If nothing is specified then the name will be the index of the element.
        """
        if(self.hist_type=='HSV'): model_frame = cv2.cvtColor(model_frame, cv2.COLOR_BGR2HSV)
        elif(self.hist_type=='GRAY'): model_frame = cv2.cvtColor(model_frame, cv2.COLOR_BGR2GRAY)
        elif(self.hist_type=='RGB'): model_frame = cv2.cvtColor(model_frame, cv2.COLOR_BGR2RGB)
        hist = cv2.calcHist([model_frame], self.channels, None, self.hist_size, self.hist_range)
        hist = cv2.normalize(hist, hist).flatten()
        if name == '': name = str(len(self.model_list))
        if name not in self.name_list:
            self.model_list.append(hist)
            self.name_list.append(name)
        else:
            for i in range(len(self.name_list)):
                if self.name_list[i] == name:
                    self.model_list[i] = hist
                    break 
開發者ID:mpatacchiola,項目名稱:deepgaze,代碼行數:25,代碼來源:color_classification.py


注:本文中的cv2.normalize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。