當前位置: 首頁>>代碼示例>>Python>>正文


Python Image.blend方法代碼示例

本文整理匯總了Python中PIL.Image.blend方法的典型用法代碼示例。如果您正苦於以下問題:Python Image.blend方法的具體用法?Python Image.blend怎麽用?Python Image.blend使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在PIL.Image的用法示例。


在下文中一共展示了Image.blend方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __call__

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def __call__(self, img_dict):
        
        if np.random.rand() < self.p:
            data_get_func = img_dict['meta']['get_item_func']
            curr_idx = img_dict['meta']['idx']
            max_idx = img_dict['meta']['max_idx']

            other_idx = np.random.randint(0, max_idx)
            data4augm = data_get_func(other_idx)
            while (curr_idx == other_idx) or (self.same_label and data4augm['label'] != img_dict['label']):
                other_idx = np.random.randint(0, max_idx)
                data4augm = data_get_func(other_idx)

            alpha = np.random.rand()

            keys = ['rgb', 'depth', 'ir']
            for key in keys:
                img_dict[key] = Image.blend(data4augm[key].resize(img_dict[key].size),
                                            img_dict[key],
                                            alpha=alpha)
            if not self.same_label:
                img_dict['label'] = alpha * img_dict['label'] + (1 - alpha) * data4augm['label']
    
        return img_dict 
開發者ID:AlexanderParkin,項目名稱:ChaLearn_liveness_challenge,代碼行數:26,代碼來源:transforms.py

示例2: _apply_basic

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def _apply_basic(self, img, mixing_weights, m):
        # This is a literal adaptation of the paper/official implementation without normalizations and
        # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the
        # typical augmentation transforms, could use a GPU / Kornia implementation.
        img_shape = img.size[0], img.size[1], len(img.getbands())
        mixed = np.zeros(img_shape, dtype=np.float32)
        for mw in mixing_weights:
            depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
            ops = np.random.choice(self.ops, depth, replace=True)
            img_aug = img  # no ops are in-place, deep copy not necessary
            for op in ops:
                img_aug = op(img_aug)
            mixed += mw * np.asarray(img_aug, dtype=np.float32)
        np.clip(mixed, 0, 255., out=mixed)
        mixed = Image.fromarray(mixed.astype(np.uint8))
        return Image.blend(img, mixed, m) 
開發者ID:rwightman,項目名稱:pytorch-image-models,代碼行數:18,代碼來源:auto_augment.py

示例3: blend_images_np

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def blend_images_np(image, image2, alpha=0.5):
    """Draws image2 on an image.
    Args:
      image: uint8 numpy array with shape (img_height, img_height, 3)
      image2: a uint8 numpy array of shape (img_height, img_height) with
        values between either 0 or 1.
      color: color to draw the keypoints with. Default is red.
      alpha: transparency value between 0 and 1. (default: 0.4)
    Raises:
      ValueError: On incorrect data type for image or image2s.
    """
    if image.dtype != np.uint8:
        raise ValueError('`image` not of type np.uint8')
    if image2.dtype != np.uint8:
        raise ValueError('`image2` not of type np.uint8')
    if image.shape[:2] != image2.shape[:2]:
        raise ValueError('The image has spatial dimensions %s but the image2 has '
                         'dimensions %s' % (image.shape[:2], image2.shape[:2]))
    pil_image = Image.fromarray(image)
    pil_image2 = Image.fromarray(image2)

    pil_image = Image.blend(pil_image, pil_image2, alpha)
    np.copyto(image, np.array(pil_image.convert('RGB')))
    return image 
開發者ID:jhu-lcsr,項目名稱:costar_plan,代碼行數:26,代碼來源:block_stacking_reader.py

示例4: blend_images_np

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def blend_images_np(image, image2, alpha=0.5):
    """Draws image2 on an image.
    Args:
      image: uint8 numpy array with shape (img_height, img_height, 3)
      image2: a uint8 numpy array of shape (img_height, img_height) with
        values between either 0 or 1.
      color: color to draw the keypoints with. Default is red.
      alpha: transparency value between 0 and 1. (default: 0.4)
    Raises:
      ValueError: On incorrect data type for image or image2s.
    """
    if image.dtype != np.uint8:
        raise ValueError('`image` not of type np.uint8')
    if image2.dtype != np.uint8:
        raise ValueError('`image2` not of type np.uint8')
    if image.shape[:2] != image2.shape:
        raise ValueError('The image has spatial dimensions %s but the image2 has '
                         'dimensions %s' % (image.shape[:2], image2.shape))
    pil_image = Image.fromarray(image)
    pil_image2 = Image.fromarray(image2)

    pil_image = Image.blend(pil_image, pil_image2, alpha)
    np.copyto(image, np.array(pil_image.convert('RGB')))
    return image 
開發者ID:jhu-lcsr,項目名稱:costar_plan,代碼行數:26,代碼來源:inception_preprocessing.py

示例5: _eval_prediction

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def _eval_prediction(self, eval_source, eval_target, seg_predictions, threshold=-1.0):
        self.sess.run([self.placeholder_init_op],
                      feed_dict={self.image_placeholder: eval_source, self.training_mode: False})
        score_predictions, seg_predictions = self.sess.run([self.score_predictions, seg_predictions])

        print('Predicted score is {}'.format(score_predictions[0]))

        eval_image = io.imread(eval_source)
        mask = np.where(seg_predictions[0] > threshold, 255, 0)
        mask = np.expand_dims(mask, axis=2).astype(np.uint8)
        mask = cv2.resize(mask, (eval_image.shape[1], eval_image.shape[0]))
        mask = Image.fromarray(mask)
        mask = mask.convert('RGB')

        eval_image = Image.fromarray(eval_image)
        eval_image = eval_image.convert('RGB')

        target_img = Image.blend(eval_image, mask, 0.5)
        target_img.save(eval_target)

        print('Image with the mask applied stored at {}'.format(eval_target)) 
開發者ID:aby2s,項目名稱:sharpmask,代碼行數:23,代碼來源:sharpmask.py

示例6: save_prediction_image

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def save_prediction_image(_, panoptic_pred, img_info, out_dir, colors, num_stuff):
    msk, cat, obj, iscrowd = panoptic_pred

    img = Image.open(img_info["abs_path"])

    # Prepare folders and paths
    folder, img_name = path.split(img_info["rel_path"])
    img_name, _ = path.splitext(img_name)
    out_dir = path.join(out_dir, folder)
    ensure_dir(out_dir)
    out_path = path.join(out_dir, img_name + ".jpg")

    # Render semantic
    sem = cat[msk].numpy()
    crowd = iscrowd[msk].numpy()
    sem[crowd == 1] = 255

    sem_img = Image.fromarray(colors[sem])
    sem_img = sem_img.resize(img_info["original_size"][::-1])

    # Render contours
    is_background = (sem < num_stuff) | (sem == 255)
    msk = msk.numpy()
    msk[is_background] = 0

    contours = find_boundaries(msk, mode="outer", background=0).astype(np.uint8) * 255
    contours = dilation(contours)

    contours = np.expand_dims(contours, -1).repeat(4, -1)
    contours_img = Image.fromarray(contours, mode="RGBA")
    contours_img = contours_img.resize(img_info["original_size"][::-1])

    # Compose final image and save
    out = Image.blend(img, sem_img, 0.5).convert(mode="RGBA")
    out = Image.alpha_composite(out, contours_img)
    out.convert(mode="RGB").save(out_path) 
開發者ID:mapillary,項目名稱:seamseg,代碼行數:38,代碼來源:test_panoptic.py

示例7: draw_boxes_with_label_and_scores

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def draw_boxes_with_label_and_scores(img_array, boxes, labels, scores):

    img_array = img_array + np.array(cfgs.PIXEL_MEAN)
    img_array.astype(np.float32)
    boxes = boxes.astype(np.int64)
    labels = labels.astype(np.int32)
    img_array = np.array(img_array * 255 / np.max(img_array), dtype=np.uint8)

    img_obj = Image.fromarray(img_array)
    raw_img_obj = img_obj.copy()

    draw_obj = ImageDraw.Draw(img_obj)
    num_of_objs = 0
    for box, a_label, a_score in zip(boxes, labels, scores):

        if a_label != NOT_DRAW_BOXES:
            num_of_objs += 1
            draw_a_rectangel_in_img(draw_obj, box, color=STANDARD_COLORS[a_label], width=3)
            if a_label == ONLY_DRAW_BOXES:  # -1
                continue
            elif a_label == ONLY_DRAW_BOXES_WITH_SCORES:  # -2
                 only_draw_scores(draw_obj, box, a_score, color='White')
                 continue
            else:
                draw_label_with_scores(draw_obj, box, a_label, a_score, color='White')

    out_img_obj = Image.blend(raw_img_obj, img_obj, alpha=0.7)

    return np.array(out_img_obj) 
開發者ID:DetectionTeamUCAS,項目名稱:R2CNN_Faster-RCNN_Tensorflow,代碼行數:31,代碼來源:draw_box_in_img.py

示例8: draw_boxes_with_label_and_scores

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def draw_boxes_with_label_and_scores(img_array, boxes, labels, scores, method, is_csl=False, in_graph=True):
    if in_graph:
        if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
            img_array = (img_array * np.array(cfgs.PIXEL_STD) + np.array(cfgs.PIXEL_MEAN_)) * 255
        else:
            img_array = img_array + np.array(cfgs.PIXEL_MEAN)
    img_array.astype(np.float32)
    boxes = boxes.astype(np.int64)
    labels = labels.astype(np.int32)
    img_array = np.array(img_array * 255 / np.max(img_array), dtype=np.uint8)

    img_obj = Image.fromarray(img_array)
    raw_img_obj = img_obj.copy()

    draw_obj = ImageDraw.Draw(img_obj)
    num_of_objs = 0

    for box, a_label, a_score in zip(boxes, labels, scores):

        if a_label != NOT_DRAW_BOXES:
            num_of_objs += 1
            draw_a_rectangel_in_img(draw_obj, box, color=STANDARD_COLORS[a_label], width=3, method=method)
            if a_label == ONLY_DRAW_BOXES:  # -1
                continue
            elif a_label == ONLY_DRAW_BOXES_WITH_SCORES:  # -2
                 only_draw_scores(draw_obj, box, a_score, color='White')
            else:
                if is_csl:
                    draw_label_with_scores_csl(draw_obj, box, a_label, a_score, color='White')
                else:
                    draw_label_with_scores(draw_obj, box, a_label, a_score, color='White')

    out_img_obj = Image.blend(raw_img_obj, img_obj, alpha=0.7)

    return np.array(out_img_obj) 
開發者ID:Thinklab-SJTU,項目名稱:R3Det_Tensorflow,代碼行數:37,代碼來源:draw_box_in_img.py

示例9: draw_boxes

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def draw_boxes(img_array, boxes, labels, scores, color, method, is_csl=False, in_graph=True):
    if in_graph:
        if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
            img_array = (img_array * np.array(cfgs.PIXEL_STD) + np.array(cfgs.PIXEL_MEAN_)) * 255
        else:
            img_array = img_array + np.array(cfgs.PIXEL_MEAN)
    img_array.astype(np.float32)
    boxes = boxes.astype(np.int64)
    labels = labels.astype(np.int32)
    img_array = np.array(img_array * 255 / np.max(img_array), dtype=np.uint8)

    img_obj = Image.fromarray(img_array)
    raw_img_obj = img_obj.copy()

    draw_obj = ImageDraw.Draw(img_obj)
    num_of_objs = 0
    for box, a_label, a_score in zip(boxes, labels, scores):

        if a_label != NOT_DRAW_BOXES:
            num_of_objs += 1
            draw_a_rectangel_in_img(draw_obj, box, color=color, width=3, method=method)
            # draw_a_rectangel_in_img(draw_obj, box, color=STANDARD_COLORS[1], width=3, method=method)
            if a_label == ONLY_DRAW_BOXES:  # -1
                continue
            elif a_label == ONLY_DRAW_BOXES_WITH_SCORES:  # -2
                 only_draw_scores(draw_obj, box, a_score, color='White')
            else:
                if is_csl:
                    draw_label_with_scores_csl(draw_obj, box, a_label, a_score, color='White')
                else:
                    draw_label_with_scores(draw_obj, box, a_label, a_score, color='White')

    out_img_obj = Image.blend(raw_img_obj, img_obj, alpha=0.7)

    return np.array(out_img_obj) 
開發者ID:Thinklab-SJTU,項目名稱:R3Det_Tensorflow,代碼行數:37,代碼來源:draw_box_in_img.py

示例10: mask_image

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def mask_image(img, mask, opacity=1.00, bg=False):
    """
        - img (PIL)
        - mask (PIL)
        - opacity (float) (default: 1.00)
    Returns a PIL image.
    """
    blank = Image.new('RGB', img.size, color=0)
    if bg:
        masked_image = Image.composite(blank, img, mask)
    else:
        masked_image = Image.composite(img, blank, mask)
    if opacity < 1:
        masked_image = Image.blend(img, masked_image, opacity)
    return masked_image 
開發者ID:MattKleinsmith,項目名稱:pbt,代碼行數:17,代碼來源:utils.py

示例11: Blend_TwoImages

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def Blend_TwoImages(image1,image2, ratio=0.5):
    # Load up the first and second demo images
#    image1 = Image.open("demo3_1.jpg")
#    image2 = Image.open("demo3_2.jpg")
    if (None==image1) or (None==image2): return
    
    # Create a new image which is the half-way blend of image1 and image2
    # The "0.5" parameter denotes the half-way point of the blend function.
    images1And2 = Image.blend(image1, image2, ratio)
    
    # Save the resulting blend as a file
#    images1And2.save("demo3_3.jpg")
    return images1And2 
開發者ID:dan59314,項目名稱:MNIST-Deep-Learning,代碼行數:15,代碼來源:RvMediaUtility.py

示例12: draw_boxes_with_label_and_scores

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def draw_boxes_with_label_and_scores(img_array, boxes, labels, scores, method, in_graph=True):
    if in_graph:
        if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
            img_array = (img_array * np.array(cfgs.PIXEL_STD) + np.array(cfgs.PIXEL_MEAN_)) * 255
        else:
            img_array = img_array + np.array(cfgs.PIXEL_MEAN)
    img_array.astype(np.float32)
    boxes = boxes.astype(np.int64)
    labels = labels.astype(np.int32)
    img_array = np.array(img_array * 255 / np.max(img_array), dtype=np.uint8)

    img_obj = Image.fromarray(img_array)
    raw_img_obj = img_obj.copy()

    draw_obj = ImageDraw.Draw(img_obj)
    num_of_objs = 0
    for box, a_label, a_score in zip(boxes, labels, scores):

        if a_label != NOT_DRAW_BOXES:
            num_of_objs += 1
            draw_a_rectangel_in_img(draw_obj, box, color=STANDARD_COLORS[a_label], width=3, method=method)
            if a_label == ONLY_DRAW_BOXES:  # -1
                continue
            elif a_label == ONLY_DRAW_BOXES_WITH_SCORES:  # -2
                 only_draw_scores(draw_obj, box, a_score, color='White')
                 continue
            else:
                draw_label_with_scores(draw_obj, box, a_label, a_score, color='White')

    out_img_obj = Image.blend(raw_img_obj, img_obj, alpha=0.7)

    return np.array(out_img_obj) 
開發者ID:DetectionTeamUCAS,項目名稱:RetinaNet_Tensorflow_Rotation,代碼行數:34,代碼來源:draw_box_in_img.py

示例13: draw_boxes

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def draw_boxes(img_array, boxes, labels, scores, color, method, in_graph=True):
    if in_graph:
        if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
            img_array = (img_array * np.array(cfgs.PIXEL_STD) + np.array(cfgs.PIXEL_MEAN_)) * 255
        else:
            img_array = img_array + np.array(cfgs.PIXEL_MEAN)
    img_array.astype(np.float32)
    boxes = boxes.astype(np.int64)
    labels = labels.astype(np.int32)
    img_array = np.array(img_array * 255 / np.max(img_array), dtype=np.uint8)

    img_obj = Image.fromarray(img_array)
    raw_img_obj = img_obj.copy()

    draw_obj = ImageDraw.Draw(img_obj)
    num_of_objs = 0
    for box, a_label, a_score in zip(boxes, labels, scores):

        if a_label != NOT_DRAW_BOXES:
            num_of_objs += 1
            draw_a_rectangel_in_img(draw_obj, box, color=color, width=3, method=method)
            # draw_a_rectangel_in_img(draw_obj, box, color=STANDARD_COLORS[1], width=3, method=method)
            if a_label == ONLY_DRAW_BOXES:  # -1
                continue
            elif a_label == ONLY_DRAW_BOXES_WITH_SCORES:  # -2
                 only_draw_scores(draw_obj, box, a_score, color='White')
                 continue
            else:
                draw_label_with_scores(draw_obj, box, a_label, a_score, color='White')

    out_img_obj = Image.blend(raw_img_obj, img_obj, alpha=0.7)

    return np.array(out_img_obj) 
開發者ID:DetectionTeamUCAS,項目名稱:RetinaNet_Tensorflow_Rotation,代碼行數:35,代碼來源:draw_box_in_img.py

示例14: _enhance_increasing_level_to_arg

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def _enhance_increasing_level_to_arg(level, _hparams):
    # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
    # range [0.1, 1.9]
    level = (level / _MAX_LEVEL) * .9
    level = 1.0 + _randomly_negate(level)
    return level, 
開發者ID:rwightman,項目名稱:pytorch-image-models,代碼行數:8,代碼來源:auto_augment.py

示例15: _apply_blended

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import blend [as 別名]
def _apply_blended(self, img, mixing_weights, m):
        # This is my first crack and implementing a slightly faster mixed augmentation. Instead
        # of accumulating the mix for each chain in a Numpy array and then blending with original,
        # it recomputes the blending coefficients and applies one PIL image blend per chain.
        # TODO the results appear in the right ballpark but they differ by more than rounding.
        img_orig = img.copy()
        ws = self._calc_blended_weights(mixing_weights, m)
        for w in ws:
            depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
            ops = np.random.choice(self.ops, depth, replace=True)
            img_aug = img_orig  # no ops are in-place, deep copy not necessary
            for op in ops:
                img_aug = op(img_aug)
            img = Image.blend(img, img_aug, w)
        return img 
開發者ID:rwightman,項目名稱:pytorch-image-models,代碼行數:17,代碼來源:auto_augment.py


注:本文中的PIL.Image.blend方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。