當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.calcOpticalFlowFarneback方法代碼示例

本文整理匯總了Python中cv2.calcOpticalFlowFarneback方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.calcOpticalFlowFarneback方法的具體用法?Python cv2.calcOpticalFlowFarneback怎麽用?Python cv2.calcOpticalFlowFarneback使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.calcOpticalFlowFarneback方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: compute_dense_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def compute_dense_optical_flow(prev_image, current_image):
  old_shape = current_image.shape
  prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
  current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
  assert current_image.shape == old_shape
  hsv = np.zeros_like(prev_image)
  hsv[..., 1] = 255
  flow = None
  flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
                                      next=current_image_gray, flow=flow,
                                      pyr_scale=0.8, levels=15, winsize=5,
                                      iterations=10, poly_n=5, poly_sigma=0,
                                      flags=10)

  mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
  hsv[..., 0] = ang * 180 / np.pi / 2
  hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
  return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) 
開發者ID:ferreirafabio,項目名稱:video2tfrecord,代碼行數:20,代碼來源:video2tfrecord.py

示例2: compute_opticalflow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def compute_opticalflow(prev_image, cur_image, args):
  prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
  cur_gray  = cv2.cvtColor(cur_image, cv2.COLOR_RGB2GRAY)
  pyr_scale = args.pyr_scale
  pyr_levels = args.pyr_levels
  winsize = args.winsize
  iterations = args.iterations
  poly_n = args.poly_n
  poly_sigma = args.poly_sigma
  flow = cv2.calcOpticalFlowFarneback(prev_gray, cur_gray, flow=None,
                                      pyr_scale=pyr_scale,
                                      levels=pyr_levels,
                                      iterations=iterations,
                                      winsize=winsize,
                                      poly_n=poly_n,
                                      poly_sigma=poly_sigma,
                                      flags=0)
  return flow 
開發者ID:linjieyangsc,項目名稱:video_seg,代碼行數:20,代碼來源:image_util.py

示例3: run_farneback

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def run_farneback(frames):
    try:
        return cv2.calcOpticalFlowFarneback(
            frames[0], frames[1],
            # options, defaults
            None,  # output
            0.5,  # pyr_scale, 0.5
            10,  # levels, 3
            min(frames[0].shape[:2]) // 5,  # winsize, 15
            10,  # iterations, 3
            7,  # poly_n, 5
            1.5,  # poly_sigma, 1.2
            cv2.OPTFLOW_FARNEBACK_GAUSSIAN,  # flags, 0
        )
    except cv2.error:
        return None 
開發者ID:facebookresearch,項目名稱:DetectAndTrack,代碼行數:18,代碼來源:tracking_engine.py

示例4: extract_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def extract_optical_flow(fn, n_frames=34):
    img = dd.image.load(fn)
    if img.shape != (128*34, 128, 3):
        return []
    frames = np.array_split(img, 34, axis=0)
    grayscale_frames = [fr.mean(-1) for fr in frames]
    mags = []
    skip_frames = np.random.randint(34 - n_frames + 1)
    middle_frame = frames[np.random.randint(skip_frames, skip_frames+n_frames)]
    im0 = grayscale_frames[skip_frames]
    for f in range(1+skip_frames, 1+skip_frames+n_frames-1):
        im1 = grayscale_frames[f]
        flow = cv2.calcOpticalFlowFarneback(im0, im1,
                    None, # flow
                    0.5, # pyr_scale
                    3, # levels
                    np.random.randint(3, 20), # winsize
                    3, #iterations
                    5, #poly_n 
                    1.2, #poly_sigma
                    0 # flags
        )
        mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
        mags.append(mag)
        im0 = im1
    mag = np.sum(mags, 0)
    mag = mag.clip(min=0)
    #norm_mag = np.tanh(mag * 10000)
    norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
    outputs = []
    outputs.append((middle_frame, norm_mag))
    return outputs 
開發者ID:gustavla,項目名稱:self-supervision,代碼行數:34,代碼來源:video_jpeg_rolls_flow_saliency.py

示例5: denseOpticalFlow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def denseOpticalFlow():
    # use 0 for webcam capturing
    # cap = cv2.VideoCapture(0)

    cap = cv2.VideoCapture('test/Pedestrian overpass.mp4')
    ret, frame1 = cap.read()
    prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
    hsv = np.zeros_like(frame1)
    hsv[...,1] = 255

    while(1):
        ret, frame2 = cap.read()
        next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
        flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])

        hsv[...,0] = ang*180/np.pi/2
        hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)

        # print(np.sum(mag[100:300, 100:300]))
        if (np.sum(mag)> 100000):
            print('motion detected')

        bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
        cv2.imshow('frame2',bgr)

        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('opticalfb.png',frame2)
            cv2.imwrite('opticalhsv.png',bgr)
        prvs = next

    cap.release()
    cv2.destroyAllWindows() 
開發者ID:sahibdhanjal,項目名稱:Mask-RCNN-Pedestrian-Detection,代碼行數:38,代碼來源:opticalFlow.py

示例6: extract_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None, 0.5, 3, 15, 3, 5, 1.2, 0)
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            mags.append(mag)
            im0 = im1
        mag = np.sum(mags, 0)
        mag = mag.clip(min=0)
        norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, norm_mag))
        return outputs 
開發者ID:gustavla,項目名稱:self-supervision,代碼行數:38,代碼來源:video_avi_flow_saliency.py

示例7: MFMGetFM

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def MFMGetFM(self, src):
        # convert scale
        I8U = np.uint8(255 * src)
        cv2.waitKey(10)
        # calculating optical flows
        if self.prev_frame is not None:
            farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
            farne_levels = pySaliencyMapDefs.farne_levels
            farne_winsize = pySaliencyMapDefs.farne_winsize
            farne_iterations = pySaliencyMapDefs.farne_iterations
            farne_poly_n = pySaliencyMapDefs.farne_poly_n
            farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
            farne_flags = pySaliencyMapDefs.farne_flags
            flow = cv2.calcOpticalFlowFarneback(
                prev=self.prev_frame,
                next=I8U,
                pyr_scale=farne_pyr_scale,
                levels=farne_levels,
                winsize=farne_winsize,
                iterations=farne_iterations,
                poly_n=farne_poly_n,
                poly_sigma=farne_poly_sigma,
                flags=farne_flags,
                flow=None
            )
            flowx = flow[..., 0]
            flowy = flow[..., 1]
        else:
            flowx = np.zeros(I8U.shape)
            flowy = np.zeros(I8U.shape)
        # create Gaussian pyramids
        dst_x = self.FMGaussianPyrCSD(flowx)
        dst_y = self.FMGaussianPyrCSD(flowy)
        # update the current frame
        self.prev_frame = np.uint8(I8U)
        # return
        return dst_x, dst_y

    # conspicuity maps
    # standard range normalization 
開發者ID:tyarkoni,項目名稱:pliers,代碼行數:42,代碼來源:pySaliencyMap.py

示例8: optic_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def optic_flow(mov, tmpl, nflows):
    """ optic flow computation using farneback """
    window = int(1 / 0.2) # window size
    nframes, Ly, Lx = mov.shape
    mov = mov.astype(np.float32)
    mov = np.reshape(mov[:int(np.floor(nframes/window)*window),:,:],
                                  (-1,window,Ly,Lx)).mean(axis=1)

    mov = mov[np.random.permutation(mov.shape[0])[:min(nflows,mov.shape[0])], :, :]

    pyr_scale=.5
    levels=3
    winsize=100
    iterations=15
    poly_n=5
    poly_sigma=1.2 / 5
    flags=0

    nframes, Ly, Lx = mov.shape
    norms = np.zeros((nframes,))
    flows = np.zeros((nframes,Ly,Lx,2))

    for n in range(nframes):
        flow = cv2.calcOpticalFlowFarneback(
            tmpl, mov[n,:,:], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)

        flows[n,:,:,:] = flow
        norms[n] = ((flow**2).sum()) ** 0.5

    return flows, norms 
開發者ID:MouseLand,項目名稱:suite2p,代碼行數:32,代碼來源:metrics.py

示例9: get_direction

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def get_direction(self, frame1, frame2, show=False):
        frame1 = cv2.resize(frame1, (self.width, self.height))
        frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        frame2 = cv2.resize(frame2, (self.width, self.height))
        frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(frame1[self.height_start:self.height_end],
                                            frame2[self.height_start:self.height_end], None, 0.5, 3, 15, 1, 5, 1.2, 0)
        flow_avg = np.median(flow, axis=(0, 1))  # [x, y]

        move_x = -1 * flow_avg[0]
        move_y = -1 * flow_avg[1]

        if show:
            hsv = np.zeros((self.height_end - self.height_start, self.width, 3))
            hsv[...,1] = 255
            mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
            hsv[..., 0] = ang * 180 / np.pi / 2
            hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
            bgr = cv2.cvtColor(np.array(hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)

            cv2.imshow('opt_flow', bgr)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print('User Interrupted')
                exit(1)

        return move_x, move_y 
開發者ID:YoongiKim,項目名稱:Walk-Assistant,代碼行數:29,代碼來源:opt_flow.py

示例10: _calc_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def _calc_optical_flow(prev, next_):
    flow = cv2.calcOpticalFlowFarneback(prev, next_, flow=None, pyr_scale=0.5, levels=3, winsize=15, iterations=3,
                                        poly_n=5, poly_sigma=1.2, flags=0)
    return flow 
開發者ID:woodfrog,項目名稱:ActionRecognition,代碼行數:6,代碼來源:OF_utils.py

示例11: dense_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def dense_flow(image):
    global prvs
    next = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)

    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    prvs = next
    return image 
開發者ID:charlielito,項目名稱:snapchat-filters-opencv,代碼行數:15,代碼來源:dense_optflow.py

示例12: next

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def next(self, arImage:np.array) -> np.array:

        # first?
        if self.arPrev.shape == (1,1): return self.first(arImage)

        # get image in black&white
        arCurrent = cv2.cvtColor(arImage, cv2.COLOR_BGR2GRAY)

        if self.sAlgorithm == "tvl1":
            arFlow = self.oTVL1.calc(self.arPrev, arCurrent, None)
        elif self.sAlgorithm == "farnback":
            arFlow = cv2.calcOpticalFlowFarneback(self.arPrev, arCurrent, flow=None, 
                pyr_scale=0.5, levels=1, winsize=15, iterations=2, poly_n=5, poly_sigma=1.1, flags=0)
        else: raise ValueError("Unknown optical flow type")

        # only 2 dims
        arFlow = arFlow[:, :, 0:2]

        # truncate to +/-15.0, then rescale to [-1.0, 1.0]
        arFlow[arFlow > self.fBound] = self.fBound 
        arFlow[arFlow < -self.fBound] = -self.fBound
        arFlow = arFlow / self.fBound

        if self.bThirdChannel:
            # add third empty channel
            arFlow = np.concatenate((arFlow, self.arZeros), axis=2) 

        self.arPrev = arCurrent

        return arFlow 
開發者ID:FrederikSchorr,項目名稱:sign-language,代碼行數:32,代碼來源:opticalflow.py

示例13: processFrame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def processFrame(self, frame, distance=None, timestep=1):
        '''
        Processes one image frame, returning summed X,Y flow and frame.

        Optional inputs are:

          distance - distance in meters to image (focal length) for returning flow in meters per second
          timestep - time step in seconds for returning flow in meters per second
        '''

        frame2 = cv2.resize(frame, self.size)
 
        gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        xsum, ysum = 0,0

        xvel, yvel = 0,0

        flow = None
        
        if not self.prev_gray is None:

            flow = cv2.calcOpticalFlowFarneback(self.prev_gray, gray, flow, pyr_scale=0.5, levels=5, winsize=13, iterations=10, poly_n=5, poly_sigma=1.1, flags=0) 

            for y in range(0, flow.shape[0], self.move_step):

                for x in range(0, flow.shape[1], self.move_step):

                    fx, fy = flow[y, x]
                    xsum += fx
                    ysum += fy

                    cv2.line(frame2, (x,y), (int(x+fx),int(y+fy)), self.mv_color_bgr)
                    cv2.circle(frame2, (x,y), 1, self.mv_color_bgr, -1)

            # Default to system time if no timestep
            curr_time = time.time()
            if not timestep:
                timestep = (curr_time - self.prev_time) if self.prev_time else 1
            self.prev_time = curr_time

            xvel = self._get_velocity(flow, xsum, flow.shape[1], distance, timestep)
            yvel = self._get_velocity(flow, ysum, flow.shape[0], distance, timestep)

        self.prev_gray = gray

        if self.window_name:
            cv2.imshow(self.window_name, frame2)
            if cv2.waitKey(1) & 0x000000FF== 27: # ESC
                return None
        
        # Return x,y velocities and new image with flow lines
        return  xvel, yvel, frame2 
開發者ID:simondlevy,項目名稱:OpenCV-Python-Hacks,代碼行數:55,代碼來源:__init__.py

示例14: extract_optical_flow

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    if not cap.isOpened():
        return []
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        flows = []
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None,
                        0.5, # py_scale
                        8,   # levels
                        int(40 * scale_factor),  # winsize
                        10,   # iterations
                        5,  # poly_n
                        1.1, # poly_sigma
                        cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
            #mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            #mags.append(mag)
            flows.append(flow)
            im0 = im1
        flow = (np.mean(flows, 0) / 100).clip(-1, 1)

        #flow = np.mean(flows, 0)
        #flow /= (flow.mean() * 5 + 1e-5)
        #flow = flow.clip(-1, 1)
        #flows = flows / (np.mean(flows, 0, keepdims=True) + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, flow))
    return outputs 
開發者ID:gustavla,項目名稱:self-supervision,代碼行數:52,代碼來源:video_avi_flow.py

示例15: run_parameter

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import calcOpticalFlowFarneback [as 別名]
def run_parameter(config_item):
    prev_img        = cv2.imread(config_item["files"]["prevImg"])
    curr_img        = cv2.imread(config_item["files"]["currImg"])
    flow_method     = config_item["parameter"]["flow_method"]
    estimate_base   = config_item["files"]["estimatepath"]  + "/"
    
    if os.path.exists(estimate_base) == False:
       os.makedirs(estimate_base)

    if os.path.exists(config_item["files"]["estflow"]):
        return
    #  compute optical flow
    if  flow_method.find("dual") >= 0:
        dual_proc = cv2.DualTVL1OpticalFlow_create(config_item["parameter"]["tau"],
                                                   config_item["parameter"]["lambda"],
                                                   config_item["parameter"]["theta"],
                                                   config_item["parameter"]["nscales"],
                                                   config_item["parameter"]["warps"])
        est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
        dual_proc.calc(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), est_flow)
    #
    elif flow_method.find("farneback") >= 0:
        est_flow = cv2.calcOpticalFlowFarneback(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
                                                cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
                                                None, 0.5, 3, 15, 3, 5, 1.2, 0)
    elif flow_method.find("plk") >= 0:
        prev_pts = list()
        for r in range(prev_img.shape[0]):
            for c in range(prev_img.shape[1]):
                prev_pts.append((c,r))
        prev_pts = np.array(prev_pts, dtype=np.float32)
        curr_pts, st, err = cv2.calcOpticalFlowPyrLK(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
                                                cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
                                               prev_pts, None,
                                               winSize=(21,21), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001))
        est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
        n = 0
        flow_pts = curr_pts - prev_pts
        for r in range(prev_img.shape[0]):
            for c in range(prev_img.shape[1]):
                est_flow[r, c, :] = flow_pts[n,:]
                n = n + 1
    #here alternative optical flow methods can be applied
    #
    else:
        raise ValueError("flow method has not been implemented")

    ut.writeFlowFile(config_item["files"]["estflow"], est_flow)
    ut.drawFlowField(config_item["files"]["estflow"][:-3] + "png", est_flow)
    print("Done -> ", config_item["files"]["estflow"]) 
開發者ID:tsenst,項目名稱:CrowdFlow,代碼行數:52,代碼來源:opticalflow_estimate.py


注:本文中的cv2.calcOpticalFlowFarneback方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。