当前位置: 首页>>代码示例>>Python>>正文


Python misc.imshow方法代码示例

本文整理汇总了Python中scipy.misc.imshow方法的典型用法代码示例。如果您正苦于以下问题:Python misc.imshow方法的具体用法?Python misc.imshow怎么用?Python misc.imshow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.misc的用法示例。


在下文中一共展示了misc.imshow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_car_image_plate_number

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def get_car_image_plate_number(image_path, image_name):
  
	img = Image(cv2.imread(image_path,0), image_name)
	l_carsR = getCarsFromImage(img.img, carClassifier)
	for carR in l_carsR:
		car = Car(img.img, carR, plateCassifier)
		car.setPlateText(processPlateText(car, net))
		img.addCar(car)
	
	for car in img.cars:
		car.draw()
		if(not car.isPlateEmpty()):
			plate_number = car.plateText
		# imshow(car.carImg)
		x, y, w, h = car.carR.x, car.carR.y, car.carR.w, car.carR.h

	color_image = imread(image_path)
	return color_image[y:y+h, x:x+w], plate_number 
开发者ID:dalmia,项目名称:WannaPark,代码行数:20,代码来源:secure_camera.py

示例2: show_grid

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def show_grid(images, rows=None, cols=None):
    """
    Converts the input images to a grid image and shows it in a new window.

    This function wraps around scipy.misc.imshow(), which requires the
    `see <image>` command to work. On Windows systems, this tends to not be
    the case.

    Parameters
    ----------
    images : (N,H,W,3) ndarray or iterable of (H,W,3) array
        See `draw_grid()`.

    rows : None or int, optional(default=None)
        See `draw_grid()`.

    cols : None or int, optional(default=None)
        See `draw_grid()`.

    """
    grid = draw_grid(images, rows=rows, cols=cols)
    misc.imshow(grid) 
开发者ID:liuguiyangnwpu,项目名称:DL.EyeSight,代码行数:24,代码来源:utils.py

示例3: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    image = data.astronaut()
    print("image shape:", image.shape)

    aug = iaa.WithColorspace(
        from_colorspace="RGB",
        to_colorspace="HSV",
        children=iaa.WithChannels(0, iaa.Add(50))
    )

    aug_no_colorspace = iaa.WithChannels(0, iaa.Add(50))

    img_show = np.hstack([
        image,
        aug.augment_image(image),
        aug_no_colorspace.augment_image(image)
    ])
    misc.imshow(img_show) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:20,代码来源:check_withcolorspace.py

示例4: show_distributions_grid

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def show_distributions_grid(params, rows=None, cols=None, graph_sizes=(350, 350), sample_sizes=None, titles=None):
    misc.imshow(
        draw_distributions_grid(
            params,
            graph_sizes=graph_sizes,
            sample_sizes=sample_sizes,
            rows=rows,
            cols=cols,
            titles=titles
        )
    ) 
开发者ID:liuguiyangnwpu,项目名称:DL.EyeSight,代码行数:13,代码来源:parameter.py

示例5: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    image = data.astronaut()
    image = ia.imresize_single_image(image, (HEIGHT, WIDTH))

    kps = []
    for y in range(NB_ROWS):
        ycoord = BB_Y1 + int(y * (BB_Y2 - BB_Y1) / (NB_COLS - 1))
        for x in range(NB_COLS):
            xcoord = BB_X1 + int(x * (BB_X2 - BB_X1) / (NB_ROWS - 1))
            kp = (xcoord, ycoord)
            kps.append(kp)
    kps = set(kps)
    kps = [ia.Keypoint(x=xcoord, y=ycoord) for (xcoord, ycoord) in kps]
    kps = ia.KeypointsOnImage(kps, shape=image.shape)

    bb = ia.BoundingBox(x1=BB_X1, x2=BB_X2, y1=BB_Y1, y2=BB_Y2)
    bbs = ia.BoundingBoxesOnImage([bb], shape=image.shape)

    seq = iaa.Affine(rotate=45)
    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_image(image)
    kps_aug = seq_det.augment_keypoints([kps])[0]
    bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

    image_before = np.copy(image)
    image_before = kps.draw_on_image(image_before)
    image_before = bbs.draw_on_image(image_before)

    image_after = np.copy(image_aug)
    image_after = kps_aug.draw_on_image(image_after)
    image_after = bbs_aug.draw_on_image(image_after)

    misc.imshow(np.hstack([image_before, image_after]))
    misc.imsave("bb_aug.jpg", np.hstack([image_before, image_after])) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:36,代码来源:check_bb_augmentation.py

示例6: _draw_samples_iteration

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def _draw_samples_iteration(self, h, w, seed, upscale_method):
        maxlen = max(h, w)
        size_px_max = self.size_px_max.draw_sample(random_state=ia.new_random_state(seed))
        if maxlen > size_px_max:
            downscale_factor = size_px_max / maxlen
            h_small = int(h * downscale_factor)
            w_small = int(w * downscale_factor)
        else:
            h_small = h
            w_small = w

        # don't go below Hx1 or 1xW
        h_small = max(h_small, 1)
        w_small = max(w_small, 1)

        generator = OpenSimplex(seed=seed)
        noise = np.zeros((h_small, w_small), dtype=np.float32)
        for y in sm.xrange(h_small):
            for x in sm.xrange(w_small):
                noise[y, x] = generator.noise2d(y=y, x=x)
        noise_0to1 = (noise + 0.5) / 2

        if noise_0to1.shape != (h, w):
            noise_0to1_uint8 = (noise_0to1 * 255).astype(np.uint8)
            noise_0to1_3d = np.tile(noise_0to1_uint8[..., np.newaxis], (1, 1, 3))
            noise_0to1 = ia.imresize_single_image(noise_0to1_3d, (h, w), interpolation=upscale_method)
            noise_0to1 = (noise_0to1[..., 0] / 255.0).astype(np.float32)

        #from scipy import misc
        #print(noise_0to1.shape, h_small, w_small, self.size_percent, self.size_px_max, maxlen)
        #misc.imshow((noise_0to1 * 255).astype(np.uint8))

        return noise_0to1 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:35,代码来源:parameters.py

示例7: example_keypoints

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def example_keypoints():
    print("Example: Keypoints")
    import imgaug as ia
    from imgaug import augmenters as iaa
    from scipy import misc
    import random
    images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)

    # Generate random keypoints.
    # The augmenters expect a list of imgaug.KeypointsOnImage.
    keypoints_on_images = []
    for image in images:
        height, width = image.shape[0:2]
        keypoints = []
        for _ in range(4):
            x = random.randint(0, width-1)
            y = random.randint(0, height-1)
            keypoints.append(ia.Keypoint(x=x, y=y))
        keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))

    seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
    seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start

    # augment keypoints and images
    images_aug = seq_det.augment_images(images)
    keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)

    # Example code to show each image and print the new keypoints coordinates
    for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
        image_before = keypoints_before.draw_on_image(image_before)
        image_after = keypoints_after.draw_on_image(image_after)
        misc.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
        for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
            keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
            x_old, y_old = keypoint_old.x, keypoint_old.y
            x_new, y_new = keypoint.x, keypoint.y
            print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new)) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:39,代码来源:test_readme_examples.py

示例8: example_background_augment_batches

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def example_background_augment_batches():
    print("Example: Background Augmentation via augment_batches()")
    import imgaug as ia
    from imgaug import augmenters as iaa
    import numpy as np
    from skimage import data

    # Number of batches and batch size for this example
    nb_batches = 10
    batch_size = 32

    # Example augmentation sequence to run in the background
    augseq = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.CoarseDropout(p=0.1, size_percent=0.1)
    ])

    # For simplicity, we use the same image here many times
    astronaut = data.astronaut()
    astronaut = ia.imresize_single_image(astronaut, (64, 64))

    # Make batches out of the example image (here: 10 batches, each 32 times
    # the example image)
    batches = []
    for _ in range(nb_batches):
        batches.append(
            np.array(
                [astronaut for _ in range(batch_size)],
                dtype=np.uint8
            )
        )

    # Show the augmented images.
    # Note that augment_batches() returns a generator.
    for images_aug in augseq.augment_batches(batches, background=True):
        misc.imshow(ia.draw_grid(images_aug, cols=8)) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:38,代码来源:test_readme_examples.py

示例9: is_route_advisor_visible

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def is_route_advisor_visible(self, scr, threshold=2):
        ra = self.get_route_advisor_image(scr)
        #misc.imshow(ra)
        #print("ra_shape", ra.shape)
        #assert ra.shape == (9, 3)
        #ra1d = np.average(ra, axis=2)
        ra_rows = np.average(ra, axis=1)
        #print("ra_rows.shape", ra_rows.shape)
        #print("ra_rows", ra_rows)
        expected = np.array([[ 25.33766234,  22.92207792,  21.94805195],
                    [ 31.79220779,  29.50649351,  28.58441558],
                    [ 70.32467532,  68.96103896,  68.32467532],
                    [ 63.51948052,  61.97402597,  61.2987013 ],
                    [ 66.20779221,  64.72727273,  64.14285714],
                    [ 64.12987013,  62.51948052,  62.01298701],
                    [ 60.61038961,  58.94805195,  58.20779221],
                    [ 65.31168831,  63.74025974,  63.12987013],
                    [ 18.18181818,  15.66233766,  14.51948052]], dtype=np.float32)

        #print("expected", ra_rows)
        #print("diff", ra_rows - expected)

        # evade brightness differences
        observed_normalized = ra_rows - np.average(ra_rows)
        expected_normalized = expected - np.average(expected)

        #print("observed_normalized", observed_normalized)
        #print("expected_normalized", expected_normalized)

        dist = np.abs(observed_normalized - expected_normalized)
        dist_avg = np.average(dist)
        #print("dist", dist)
        #print("dist_avg", dist_avg)
        return dist_avg < threshold

    # quite close scores even for some non-paused images 
开发者ID:aleju,项目名称:self-driving-truck,代码行数:38,代码来源:ets2window.py

示例10: is_offence_shown

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def is_offence_shown(self, scr, threshold=0.97):
        time_start = time.time()
        y1 = 584
        y2 = 591 + 1
        x1 = 1119
        x2 = 1180 + 1
        offence_area = scr[y1:y2, x1:x2, :]
        x, y, score = util.template_match(needle=self.offence_ff_image, haystack=offence_area)
        time_req = time.time() - time_start
        #print("in %.4fs" % (time_req,))
        #print("is_offence_shown", x, y, score)
        #misc.imshow(offence_area)
        return score >= threshold 
开发者ID:aleju,项目名称:self-driving-truck,代码行数:15,代码来源:ets2window.py

示例11: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    nb_rows = 8
    nb_cols = 8
    h, w = (128, 128)
    sample_size = 128

    noise_gens = [
        iap.SimplexNoise(),
        iap.FrequencyNoise(exponent=-4, size_px_max=sample_size, upscale_method="cubic"),
        iap.FrequencyNoise(exponent=-2, size_px_max=sample_size, upscale_method="cubic"),
        iap.FrequencyNoise(exponent=0, size_px_max=sample_size, upscale_method="cubic"),
        iap.FrequencyNoise(exponent=2, size_px_max=sample_size, upscale_method="cubic"),
        iap.FrequencyNoise(exponent=4, size_px_max=sample_size, upscale_method="cubic"),
        iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
        iap.IterativeNoiseAggregator(
            other_param=iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
            iterations=(1, 3),
            aggregation_method=["max", "avg"]
        ),
        iap.IterativeNoiseAggregator(
            other_param=iap.Sigmoid(
                iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
                threshold=(-10, 10),
                activated=0.33,
                mul=20,
                add=-10
            ),
            iterations=(1, 3),
            aggregation_method=["max", "avg"]
        )
    ]

    samples = [[] for _ in range(len(noise_gens))]
    for _ in range(nb_rows * nb_cols):
        for i, noise_gen in enumerate(noise_gens):
            samples[i].append(noise_gen.draw_samples((h, w)))

    rows = [np.hstack(row) for row in samples]
    grid = np.vstack(rows)
    misc.imshow((grid*255).astype(np.uint8))

    images = [ia.quokka_square(size=(128, 128)) for _ in range(16)]
    seqs = [
        iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0)),
        iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True),
        iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0)),
        iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True)
    ]
    images_aug = []

    for seq in seqs:
        images_aug.append(np.hstack(seq.augment_images(images)))
    images_aug = np.vstack(images_aug)
    misc.imshow(images_aug) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:56,代码来源:check_noise.py

示例12: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    image = data.astronaut()
    image = ia.imresize_single_image(image, (128, 128))

    images = []
    params = [
        (0.25, 0.25),
        (1.0, 0.25),
        (2.0, 0.25),
        (3.0, 0.25),
        (0.25, 0.50),
        (1.0, 0.50),
        (2.0, 0.50),
        (3.0, 0.50),
        (0.25, 0.75),
        (1.0, 0.75),
        (2.0, 0.75),
        (3.0, 0.75)
    ]

    for (alpha, sigma) in params:
        images_row = []
        seqs_row = [
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=0, order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=128, order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=255, order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=0, order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=128, order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=255, order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=0, order=3),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=128, order=3),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="constant", cval=255, order=3),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="nearest", order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="nearest", order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="nearest", order=2),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="nearest", order=3),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="reflect", order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="reflect", order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="reflect", order=2),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="reflect", order=3),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="wrap", order=0),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="wrap", order=1),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="wrap", order=2),
            iaa.ElasticTransformation(alpha=alpha, sigma=sigma, mode="wrap", order=3)
        ]

        for seq in seqs_row:
            images_row.append(
                seq.augment_image(image)
            )

        images.append(np.hstack(images_row))

    misc.imshow(np.vstack(images))
    misc.imsave("elastic_transformations.jpg", np.vstack(images)) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:57,代码来源:check_elastic_transformation.py

示例13: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    augseq = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.CoarseDropout(p=0.1, size_percent=0.1)
    ])

    print("------------------")
    print("augseq.augment_batches(batches, background=True)")
    print("------------------")
    batches = list(load_images())
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = []
    for batch_aug in batches_aug:
        images_aug.append(batch_aug.images_aug)
        keypoints_aug.append(batch_aug.keypoints_aug)
    misc.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("augseq.augment_batches(batches, background=True) -> only images")
    print("------------------")
    batches = list(load_images())
    batches = [batch.images for batch in batches]
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = None
    for batch_aug in batches_aug:
        images_aug.append(batch_aug)
    misc.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("BackgroundAugmenter")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    images_aug = []
    keypoints_aug = []
    while True:
        print("Next batch...")
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        images_aug.append(batch.images_aug)
        keypoints_aug.append(batch.keypoints_aug)
    misc.imshow(draw_grid(images_aug, keypoints_aug)) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:48,代码来源:check_background_augmentation.py

示例14: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    image = ia.quokka(size=0.5)
    kps = [ia.KeypointsOnImage(
        [ia.Keypoint(x=245, y=203), ia.Keypoint(x=365, y=195), ia.Keypoint(x=313, y=269)],
        shape=(image.shape[0]*2, image.shape[1]*2)
    )]
    kps[0] = kps[0].on(image.shape)
    print("image shape:", image.shape)

    augs = [
        iaa.PerspectiveTransform(scale=0.01, name="pt001", keep_size=True),
        iaa.PerspectiveTransform(scale=0.1, name="pt01", keep_size=True),
        iaa.PerspectiveTransform(scale=0.2, name="pt02", keep_size=True),
        iaa.PerspectiveTransform(scale=0.3, name="pt03", keep_size=True),
        iaa.PerspectiveTransform(scale=(0, 0.3), name="pt00to03", keep_size=True)
    ]

    print("original", image.shape)
    misc.imshow(kps[0].draw_on_image(image))

    print("-----------------")
    print("Random aug per image")
    print("-----------------")
    for aug in augs:
        images_aug = []
        for _ in range(16):
            aug_det = aug.to_deterministic()
            img_aug = aug_det.augment_image(image)
            kps_aug = aug_det.augment_keypoints(kps)[0]
            img_aug_kps = kps_aug.draw_on_image(img_aug)
            img_aug_kps = np.pad(img_aug_kps, ((1, 1), (1, 1), (0, 0)), mode="constant", constant_values=255)
            #print(aug.name, img_aug_kps.shape, img_aug_kps.shape[1]/img_aug_kps.shape[0])
            images_aug.append(img_aug_kps)
            #misc.imshow(img_aug_kps)
        print(aug.name)
        misc.imshow(ia.draw_grid(images_aug))

    print("----------------")
    print("6 channels")
    print("----------------")
    image6 = np.dstack([image, image])
    image6_aug = augs[1].augment_image(image6)
    misc.imshow(
        np.hstack([image6_aug[..., 0:3], image6_aug[..., 3:6]])
    ) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:47,代码来源:check_perspective_transform.py

示例15: main

# 需要导入模块: from scipy import misc [as 别名]
# 或者: from scipy.misc import imshow [as 别名]
def main():
    image = ia.quokka(size=0.5)
    print(image.shape)
    kps = [
        ia.KeypointsOnImage(
            [
                ia.Keypoint(x=123, y=102),
                ia.Keypoint(x=182, y=98),
                ia.Keypoint(x=155, y=134),

                #ia.Keypoint(x=255, y=213),
                #ia.Keypoint(x=375, y=205),
                #ia.Keypoint(x=323, y=279),

                #ia.Keypoint(x=265, y=223),
                #ia.Keypoint(x=385, y=215),
                #ia.Keypoint(x=333, y=289),

                #ia.Keypoint(x=275, y=233),
                #ia.Keypoint(x=395, y=225),
                #ia.Keypoint(x=343, y=299),

                ia.Keypoint(x=-20, y=20)
            ],
            shape=(image.shape[0], image.shape[1])
        )
    ]
    #kps[0] = kps[0].on(image.shape)
    print("image shape:", image.shape)

    augs = [
        #iaa.PiecewiseAffine(scale=0),
        iaa.PiecewiseAffine(scale=0.05),
        iaa.PiecewiseAffine(scale=0.1),
        iaa.PiecewiseAffine(scale=0.2)
    ]

    #print("original", image.shape)
    misc.imshow(kps[0].draw_on_image(image))

    print("-----------------")
    print("Random aug per image")
    print("-----------------")
    for aug in augs:
        images_aug = []
        for _ in range(16):
            aug_det = aug.to_deterministic()
            img_aug = aug_det.augment_image(image)
            kps_aug = aug_det.augment_keypoints(kps)[0]
            #img_aug_kps = kps_aug.draw_on_image(img_aug)
            img_aug_kps = keypoints_draw_on_image(kps_aug, img_aug)
            img_aug_kps = np.pad(img_aug_kps, ((1, 1), (1, 1), (0, 0)), mode="constant", constant_values=255)
            #print(aug.name, img_aug_kps.shape, img_aug_kps.shape[1]/img_aug_kps.shape[0])
            images_aug.append(img_aug_kps)
            #misc.imshow(img_aug_kps)
        print(aug.name)
        misc.imshow(ia.draw_grid(images_aug)) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:59,代码来源:check_piecewise_affine.py


注:本文中的scipy.misc.imshow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。