當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.multiply方法代碼示例

本文整理匯總了Python中cv2.multiply方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.multiply方法的具體用法?Python cv2.multiply怎麽用?Python cv2.multiply使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.multiply方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: imnormalize_

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def imnormalize_(img, mean, std, to_rgb=True):
    """Inplace normalize an image with mean and std.

    Args:
        img (ndarray): Image to be normalized.
        mean (ndarray): The mean to be used for normalize.
        std (ndarray): The std to be used for normalize.
        to_rgb (bool): Whether to convert to rgb.

    Returns:
        ndarray: The normalized image.
    """
    # cv2 inplace normalization does not accept uint8
    assert img.dtype != np.uint8
    mean = np.float64(mean.reshape(1, -1))
    stdinv = 1 / np.float64(std.reshape(1, -1))
    if to_rgb:
        cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)  # inplace
    cv2.subtract(img, mean, img)  # inplace
    cv2.multiply(img, stdinv, img)  # inplace
    return img 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:23,代碼來源:photometric.py

示例2: alpha_blend

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def alpha_blend(background_, foreground_, mask_):
    background = background_.copy()
    foreground = foreground_.copy()
    mask = mask_.copy()

    background = background.astype(float)
    foreground = foreground.astype(float)
    mask = mask.astype(float) / 255
    foreground = cv2.multiply(mask, foreground)
    background = cv2.multiply(1.0 - mask, background)
    image = cv2.add(foreground, background)

    return image 
開發者ID:zerofox-oss,項目名稱:deepstar,代碼行數:15,代碼來源:cv.py

示例3: imdenormalize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def imdenormalize(img, mean, std, to_bgr=True):
    assert img.dtype != np.uint8
    mean = mean.reshape(1, -1).astype(np.float64)
    std = std.reshape(1, -1).astype(np.float64)
    img = cv2.multiply(img, std)  # make a copy
    cv2.add(img, mean, img)  # inplace
    if to_bgr:
        cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img)  # inplace
    return img 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:11,代碼來源:photometric.py

示例4: _annotate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _annotate(self, im : np.array, annotations : list) -> np.array:
        '''
        - Arguments:
            - im: np.array of shape (h, w, 3)
            - annotations: a list with 3 entries:
                - masks: np.array of shape (nb_masks, h, w)
                - classes: np.array of shape (nb_masks, )
                - scores: np.array of shape (nb_masks, )
        
        - Returns:
            - annotated_im: image with the visual annotations embedded in it.
        '''
        masks = annotations[0]
        classes = annotations[1]
        
        to_return = im.copy().astype(float)

        # TODO: Add border to masks
        # TODO: Think about how to solve issue of overlaping masks
        # TODO: Add class names to masks

        for idx, mask in enumerate(masks):
            alpha = cv2.merge((mask, mask, mask))
            alpha = alpha.astype(float)
            alpha[alpha == 1.0] = self._transparency

            #1. Mask foreground
            foreground = np.zeros_like(to_return, dtype = float)
            foreground[:] = self.colors[int(classes[idx]) % len(self.colors)]
            foreground = cv2.multiply(alpha, foreground)
            
            #2. Image background
            background = cv2.multiply(1.0 - alpha, to_return)
            to_return = cv2.add(foreground, background)
        
        return to_return.astype(np.uint8) 
開發者ID:videoflow,項目名稱:videoflow,代碼行數:38,代碼來源:annotators.py

示例5: _mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _mask(self, im : np.array, mask : np.array) -> np.array:
        if mask.shape[:2] != im.shape[:2]:
            raise ValueError("`mask` does not have same dimensions as `im`")
        im = im.astype(float)
        alpha = cv2.merge((mask, mask, mask))
        masked = cv2.multiply(im, alpha)
        return masked.astype(np.uint8) 
開發者ID:videoflow,項目名稱:videoflow,代碼行數:9,代碼來源:transformers.py

示例6: _find_blobs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _find_blobs(self, im, scoring_fun):
        grey= cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
        rad = int(self._adaptive_med_rad * im.shape[1])
        if rad % 2 == 0:
            rad += 1

        med = np.median(grey)
        scale = 255/(med)
        cv2.multiply(grey,scale,dst=grey)
        bin = np.copy(grey)
        score_map = np.zeros_like(bin)
        for t in range(0, 255,5):
            cv2.threshold(grey, t, 255,cv2.THRESH_BINARY_INV,bin)
            if np.count_nonzero(bin) > 0.7 * im.shape[0] * im.shape[1]:
                continue
            if CV_VERSION == 3:
                _, contours, h = cv2.findContours(bin,cv2.RETR_EXTERNAL,CHAIN_APPROX_SIMPLE)
            else:
                contours, h = cv2.findContours(bin,cv2.RETR_EXTERNAL,CHAIN_APPROX_SIMPLE)

            bin.fill(0)
            for c in contours:
                score = scoring_fun(c, im)
                if score >0:
                    cv2.drawContours(bin,[c],0,score,-1)
            cv2.add(bin, score_map,score_map)
        return score_map 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:29,代碼來源:target_roi_builder.py

示例7: _pre_process_input_minimal

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:30,代碼來源:adaptive_bg_tracker.py

示例8: changeBrightness

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def changeBrightness(self, frame = None, coefficient = None):
		"""
		Change the brightness of a frame.
		Args:
			frame: A tensor that contains an image.
			coefficient: A float that changes the brightness of the image.
									Default is a random number in the range of 2.
		Returns:
			A tensor with its brightness property changed.
		"""
		# Assertions
		if (self.assertion.assertNumpyType(frame) == False):
			raise ValueError("Frame has to be a numpy array.")
		if (len(frame.shape) == 3):
			channels = 3
		elif (len(frame.shape) == 2):
			channels = 1
		else:
			raise Exception("ERROR: Frame has to be either 1 or 3 channels.")
		if (coefficient == None):
			coefficient = np.random.rand()*2
		if (type(coefficient) != float):
			raise TypeError("ERROR: Coefficient parameter has to be of type float.")
		# Change brightness
		if (channels == 3):
			for i in range(channels):
				frame[:, :, i] = cv2.multiply(frame[:, :, i], coefficient)
		elif (channels == 1):
			frame[:, :] = cv2.multiply(frame[:, :], coefficient)
		# Force cast in case of overflow
		if (not (frame.dtype == np.uint8)):
			print("WARNING: Image is not dtype uint8. Forcing type.")
			frame = frame.astype(np.uint8)
		return frame 
開發者ID:lozuwa,項目名稱:impy,代碼行數:36,代碼來源:ColorAugmenters.py

示例9: ut_generate_grassland_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def ut_generate_grassland_mask():
    # An example of generate soft mask for grassland segmentation
    import scipy.io as sio

    index = 16 - 1   # image index from 1
    data = sio.loadmat('../../data/UoT_soccer/train_val.mat')
    annotation = data['annotation']
    homo = annotation[0][index][1]  # ground truth homography

    # step 1: generate a 'hard' grass mask
    template_h = 74
    template_w = 115
    tempalte_im = np.ones((template_h, template_w, 1), dtype=np.uint8) * 255

    grass_mask = IouUtil.homography_warp(homo, tempalte_im, (1280, 720), (0));
    cv.imshow('grass mask', grass_mask)
    cv.waitKey(0)

    # step 2: generate a 'soft' grass mask
    dist_threshold = 30  # change this value to change mask boundary
    _, binary_im = cv.threshold(grass_mask, 10, 255, cv.THRESH_BINARY_INV)

    dist_im = cv.distanceTransform(binary_im, cv.DIST_L2, cv.DIST_MASK_PRECISE)

    dist_im[dist_im > dist_threshold] = dist_threshold
    soft_mask = 1.0 - dist_im / dist_threshold  # normalize to [0, 1]

    cv.imshow('soft mask', soft_mask)
    cv.waitKey(0)

    # step 3: soft mask on the original image
    stacked_mask = np.stack((soft_mask,) * 3, axis=-1)
    im = cv.imread('../../data/16.jpg')
    soft_im = cv.multiply(stacked_mask, im.astype(np.float32)).astype(np.uint8)
    cv.imshow('soft masked image', soft_im)
    cv.waitKey(0) 
開發者ID:lood339,項目名稱:SCCvSD,代碼行數:38,代碼來源:iou_util.py

示例10: get_training_data

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def get_training_data( images,landmarks,batch_size):
  while 1:
    indices = numpy.random.choice(range(0,images.shape[0]),size=batch_size,replace=True)
    for i,index in enumerate(indices):
      image = images[index]
      seed  = int(time.time())
      image = random_transform( image, seed, **random_transform_args )
      closest = ( numpy.mean(numpy.square(landmarks[index]-landmarks),axis=(1,2)) ).argsort()[1:20]
      closest = numpy.random.choice(closest, 6, replace=False)
      closestMerged = numpy.dstack([  
                                      cv2.resize( random_transform( images[closest[0]][:,:,:3] ,seed, **random_transform_args) , (64,64)), 
                                      cv2.resize( random_transform( images[closest[1]][:,:,:3] ,seed, **random_transform_args) , (64,64)), 
                                      cv2.resize( random_transform( images[closest[2]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[3]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[4]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[5]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                    ])

      if i == 0:
          warped_images  = numpy.empty( (batch_size,)  + (64,64,3),   image.dtype )
          example_images = numpy.empty( (batch_size,)  + (64,64,18),  image.dtype )
          target_images  = numpy.empty( (batch_size,)  + (128,128,3), image.dtype )
          mask_images    = numpy.empty( (batch_size,)  + (128,128,1), image.dtype )

      warped_image =  random_warp( image[:,:,:3] )

      warped_image =  cv2.GaussianBlur( warped_image,(91,91),0 )

      image_mask = image[:,:,3].reshape((image.shape[0],image.shape[1],1)) * numpy.ones((image.shape[0],image.shape[1],3)).astype(float)


      foreground = cv2.multiply(image_mask, warped_image.astype(float))
      background = cv2.multiply(1.0 - image_mask, image[:,:,:3].astype(float))

      warped_image = numpy.add(background,foreground)

      warped_image = cv2.resize(warped_image,(64,64))

      warped_images[i]  = warped_image
      example_images[i] = closestMerged
      target_images[i]  = cv2.resize( image[:,:,:3], (128,128) )
      mask_images[i]    = cv2.resize( image[:,:,3], (128,128) ).reshape((128,128,1))
    yield warped_images,example_images,target_images,mask_images 
開發者ID:dfaker,項目名稱:df,代碼行數:45,代碼來源:exampleTrainer.py

示例11: _blend_alpha_uint8_elementwise_

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _blend_alpha_uint8_elementwise_(image_fg, image_bg, alphas):
    betas = 1.0 - alphas

    is_2d = (alphas.ndim == 2 or alphas.shape[2] == 1)
    area = image_fg.shape[0] * image_fg.shape[1]
    if is_2d and area >= 64*64:
        if alphas.ndim == 3:
            alphas = alphas[:, :, 0]
            betas = betas[:, :, 0]

        result = []
        for c in range(image_fg.shape[2]):
            image_fg_mul = image_fg[:, :, c]
            image_bg_mul = image_bg[:, :, c]
            image_fg_mul = cv2.multiply(image_fg_mul, alphas, dtype=cv2.CV_8U)
            image_bg_mul = cv2.multiply(image_bg_mul, betas, dtype=cv2.CV_8U)
            image_fg_mul = cv2.add(image_fg_mul, image_bg_mul, dst=image_fg_mul)
            result.append(image_fg_mul)

        image_blend = _merge_channels(result, image_fg.ndim == 3)
        return image_blend
    else:
        if alphas.ndim == 2:
            alphas = alphas[..., np.newaxis]
            betas = betas[..., np.newaxis]
        if alphas.shape[2] != image_fg.shape[2]:
            alphas = np.tile(alphas, (1, 1, image_fg.shape[2]))
            betas = np.tile(betas, (1, 1, image_fg.shape[2]))

        alphas = alphas.ravel()
        betas = betas.ravel()
        input_shape = image_fg.shape

        image_fg_mul = image_fg.ravel()
        image_bg_mul = image_bg.ravel()
        image_fg_mul = cv2.multiply(
            image_fg_mul, alphas, dtype=cv2.CV_8U, dst=image_fg_mul
        )
        image_bg_mul = cv2.multiply(
            image_bg_mul, betas, dtype=cv2.CV_8U, dst=image_bg_mul
        )

        image_fg_mul = cv2.add(image_fg_mul, image_bg_mul, dst=image_fg_mul)

        return image_fg_mul.reshape(input_shape)


# Added in 0.5.0.
# (Extracted from blend_alpha().) 
開發者ID:aleju,項目名稱:imgaug,代碼行數:51,代碼來源:blend.py

示例12: update

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def update(self, img_t, t, fg_mask=None):
        dt = float(t - self.last_t)
        if dt < 0:
            # raise EthoscopeException("Negative time interval between two consecutive frames")
            raise NoPositionError("Negative time interval between two consecutive frames")

        # clip the half life to possible value:
        self._current_half_life = np.clip(self._current_half_life, self._min_half_life, self._max_half_life)

        # ensure preallocated buffers exist. otherwise, initialise them
        if self._bg_mean is None:
            self._bg_mean = img_t.astype(np.float32)
            # self._bg_sd = np.zeros_like(img_t)
            # self._bg_sd.fill(128)

        if self._buff_alpha_matrix is None:
            self._buff_alpha_matrix = np.ones_like(img_t,dtype = np.float32)

        # the learning rate, alpha, is an exponential function of half life
        # it correspond to how much the present frame should account for the background

        lam =  np.log(2)/self._current_half_life
        # how much the current frame should be accounted for
        alpha = 1 - np.exp(-lam * dt)

        # set-p a matrix of learning rate. it is 0 where foreground map is true
        self._buff_alpha_matrix.fill(alpha)
        if fg_mask is not None:
            cv2.dilate(fg_mask,None,fg_mask)
            cv2.subtract(self._buff_alpha_matrix, self._buff_alpha_matrix, self._buff_alpha_matrix, mask=fg_mask)


        if self._buff_invert_alpha_mat is None:
            self._buff_invert_alpha_mat = 1 - self._buff_alpha_matrix
        else:
            np.subtract(1, self._buff_alpha_matrix, self._buff_invert_alpha_mat)

        np.multiply(self._buff_alpha_matrix, img_t, self._buff_alpha_matrix)
        np.multiply(self._buff_invert_alpha_mat, self._bg_mean, self._buff_invert_alpha_mat)
        np.add(self._buff_alpha_matrix, self._buff_invert_alpha_mat, self._bg_mean)

        self.last_t = t 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:44,代碼來源:adaptive_bg_tracker.py

示例13: _pre_process_input

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def _pre_process_input(self, img, mask, t):

        blur_rad = int(self._object_expected_size * np.max(img.shape) * 2.0)
        if blur_rad % 2 == 0:
            blur_rad += 1


        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            self._buff_grey_blurred = np.empty_like(self._buff_grey)
            # self._buff_grey_blurred = np.empty_like(self._buff_grey)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

            mask_conv = cv2.blur(mask,(blur_rad, blur_rad))

            self._buff_convolved_mask  = (1/255.0 *  mask_conv.astype(np.float32))


        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)

        hist = cv2.calcHist([self._buff_grey], [0], None, [256], [0,255]).ravel()
        hist = np.convolve(hist, [1] * 3)
        mode =  np.argmax(hist)

        self._smooth_mode.append(mode)
        self._smooth_mode_tstamp.append(t)

        if len(self._smooth_mode_tstamp) >2 and self._smooth_mode_tstamp[-1] - self._smooth_mode_tstamp[0] > self._smooth_mode_window_dt:
            self._smooth_mode.popleft()
            self._smooth_mode_tstamp.popleft()


        mode = np.mean(list(self._smooth_mode))
        scale = 128. / mode

        # cv2.GaussianBlur(self._buff_grey,(5,5), 1.5,self._buff_grey)

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)

        cv2.blur(self._buff_grey,(blur_rad, blur_rad), self._buff_grey_blurred)
        #fixme could be optimised
        self._buff_grey_blurred = (self._buff_grey_blurred / self._buff_convolved_mask).astype(np.uint8)


        cv2.absdiff(self._buff_grey, self._buff_grey_blurred, self._buff_grey)

        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:55,代碼來源:adaptive_bg_tracker.py

示例14: separate_background

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def separate_background(self, data):
        if "predictions" not in data:
            return

        predictions = data["predictions"]
        if "instances" not in predictions:
            return

        instances = predictions["instances"]
        if not instances.has("pred_masks"):
            return

        # Sum up all the instance masks
        mask = instances.pred_masks.cpu().sum(0) >= 1
        mask = mask.numpy().astype("uint8")*255
        # Create 3-channels mask
        mask = np.stack([mask, mask, mask], axis=2)

        # Apply a slight blur to the mask to soften edges
        mask = cv2.GaussianBlur(mask, self.me_kernel, 0)

        # Take the foreground input image
        foreground = data["image"]

        # Create a Gaussian blur for the background image
        background = cv2.GaussianBlur(foreground, self.bg_kernel, 0)

        if self.desaturate:
            # Convert background into grayscale
            background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)

            # convert single channel grayscale image to 3-channel grayscale image
            background = cv2.cvtColor(background, cv2.COLOR_GRAY2RGB)

        # Convert uint8 to float
        foreground = foreground.astype(float)
        background = background.astype(float)

        # Normalize the alpha mask to keep intensity between 0 and 1
        mask = mask.astype(float)/255.0

        # Multiply the foreground with the mask
        foreground = cv2.multiply(foreground, mask)

        # Multiply the background with ( 1 - mask )
        background = cv2.multiply(background, 1.0 - mask)

        # Add the masked foreground and background
        dst_image = cv2.add(foreground, background)

        # Return a normalized output image for display
        data[self.dst] = dst_image.astype("uint8") 
開發者ID:jagin,項目名稱:detectron2-pipeline,代碼行數:54,代碼來源:separate_background.py

示例15: sharpening

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import multiply [as 別名]
def sharpening(self, frame = None, weight = None):
		"""
		Sharpens an image using the following system:
		frame = I(x, y, d)
		gray_frame(xi, yi) = sum(I(xi, yi, d) * [0.6, 0.3, 0.1])
		hff_kernel = [[-1,-1,-1],[-1,8,-1],[-1,-1,-1]]
		edges(x, y) = hff_kernel * gray_frame
		weight = 2.0
		sharpened(x, y, di) = (edges x weight) + frame(x, y, di)
		Args:
			frame: A tensor that contains an image.
			weight: A float that contains the weight coefficient.
		Returns:
			A sharpened tensor.
		"""
		# Assertions
		if (self.assertion.assertNumpyType(frame) == False):
			raise ValueError("Frame has to be a numpy array.")
		if (len(frame.shape) == 3):
			channels = 3
		elif (len(frame.shape) == 2):
			channels = 1
		else:
			raise ValueError("Frame not understood.")
		if (weight == None):
			weight = 2.0
		if (type(weight) != float):
			raise TypeError("ERROR: Weight has to be a float.")
		# Local variables
		hff_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]])
		# Logic
		if (channels == 3):
			gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			edges = cv2.filter2D(gray_frame, -1, hff_kernel)
			edges = cv2.multiply(edges, weight)
			sharpened = np.zeros(frame.shape, np.uint8)
			for i in range(channels):
				sharpened[:, :, i] = cv2.add(frame[:, :, i], edges)
		else:
			edges = cv2.filter2D(frame, -1, hff_kernel)
			edges = cv2.multiply(edges, weight)
			sharpened[:, :] = cv2.add(frame[:, :], edges)
		if (not (sharpened.dtype == np.uint8)):
			print("WARNING: Image is not dtype uint8. Forcing type.")
			sharpened = sharpened.astype(np.uint8)
		return sharpened 
開發者ID:lozuwa,項目名稱:impy,代碼行數:48,代碼來源:ColorAugmenters.py


注:本文中的cv2.multiply方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。