当前位置: 首页>>代码示例>>Python>>正文


Python augmenters.Crop方法代码示例

本文整理汇总了Python中imgaug.augmenters.Crop方法的典型用法代码示例。如果您正苦于以下问题:Python augmenters.Crop方法的具体用法?Python augmenters.Crop怎么用?Python augmenters.Crop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imgaug.augmenters的用法示例。


在下文中一共展示了augmenters.Crop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: example_augment_images_and_heatmaps

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def example_augment_images_and_heatmaps():
    print("Example: Augment Images and Heatmaps")
    import numpy as np
    import imgaug.augmenters as iaa

    # Standard scenario: You have N RGB-images and additionally 21 heatmaps per
    # image. You want to augment each image and its heatmaps identically.
    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
    heatmaps = np.random.random(size=(16, 64, 64, 1)).astype(np.float32)

    seq = iaa.Sequential([
        iaa.GaussianBlur((0, 3.0)),
        iaa.Affine(translate_px={"x": (-40, 40)}),
        iaa.Crop(px=(0, 10))
    ])

    images_aug, heatmaps_aug = seq(images=images, heatmaps=heatmaps) 
开发者ID:aleju,项目名称:imgaug,代码行数:19,代码来源:check_readme_examples.py

示例2: example_augment_images_and_segmentation_maps

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def example_augment_images_and_segmentation_maps():
    print("Example: Augment Images and Segmentation Maps")
    import numpy as np
    import imgaug.augmenters as iaa

    # Standard scenario: You have N=16 RGB-images and additionally one segmentation
    # map per image. You want to augment each image and its heatmaps identically.
    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
    segmaps = np.random.randint(0, 10, size=(16, 64, 64, 1), dtype=np.int32)

    seq = iaa.Sequential([
        iaa.GaussianBlur((0, 3.0)),
        iaa.Affine(translate_px={"x": (-40, 40)}),
        iaa.Crop(px=(0, 10))
    ])

    images_aug, segmaps_aug = seq(images=images, segmentation_maps=segmaps) 
开发者ID:aleju,项目名称:imgaug,代码行数:19,代码来源:check_readme_examples.py

示例3: __init__

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def __init__(self, augmentation_rate):
        self.augs = iaa.Sometimes(
            augmentation_rate,
            iaa.SomeOf(
                (4, 7),
                [
                    iaa.Affine(rotate=(-10, 10)),
                    iaa.Fliplr(0.2),
                    iaa.AverageBlur(k=(2, 10)),
                    iaa.Add((-10, 10), per_channel=0.5),
                    iaa.Multiply((0.75, 1.25), per_channel=0.5),
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
                    iaa.Crop(px=(0, 20))
                ],
                random_order=True
            )
        ) 
开发者ID:Giphy,项目名称:celeb-detection-oss,代码行数:19,代码来源:img_augmentor.py

示例4: _rectify_augmenter

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def _rectify_augmenter(self, augment):
        import netharn as nh
        if augment is True:
            augment = 'simple'

        if not augment:
            augmenter = None
        elif augment == 'simple':
            augmenter = iaa.Sequential([
                iaa.Crop(percent=(0, .2)),
                iaa.Fliplr(p=.5)
            ])
        elif augment == 'complex':
            augmenter = iaa.Sequential([
                iaa.Sometimes(0.2, nh.data.transforms.HSVShift(hue=0.1, sat=1.5, val=1.5)),
                iaa.Crop(percent=(0, .2)),
                iaa.Fliplr(p=.5)
            ])
        else:
            raise KeyError('Unknown augmentation {!r}'.format(augment))
        return augmenter 
开发者ID:Erotemic,项目名称:netharn,代码行数:23,代码来源:sseg_camvid.py

示例5: _rectify_augmenter

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def _rectify_augmenter(self, augmenter):
        import netharn as nh
        if augmenter is True:
            augmenter = 'simple'

        if not augmenter:
            augmenter = None
        elif augmenter == 'simple':
            augmenter = iaa.Sequential([
                iaa.Crop(percent=(0, .2)),
                iaa.Fliplr(p=.5)
            ])
        elif augmenter == 'complex':
            augmenter = iaa.Sequential([
                iaa.Sometimes(0.2, nh.data.transforms.HSVShift(hue=0.1, sat=1.5, val=1.5)),
                iaa.Crop(percent=(0, .2)),
                iaa.Fliplr(p=.5)
            ])
        else:
            raise KeyError('Unknown augmentation {!r}'.format(self.augment))
        return augmenter 
开发者ID:Erotemic,项目名称:netharn,代码行数:23,代码来源:segmentation.py

示例6: _load_augmentation_aug_geometric

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def _load_augmentation_aug_geometric():
    return iaa.OneOf([
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.2)]),
        iaa.CropAndPad(percent=(-0.05, 0.1),
                       pad_mode='constant',
                       pad_cval=(0, 255)),
        iaa.Crop(percent=(0.0, 0.1)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Sequential([
            iaa.Affine(
                    # scale images to 80-120% of their size,
                    # individually per axis
                    scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    # translate by -20 to +20 percent (per axis)
                    translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    shear=(-16, 16),  # shear by -16 to +16 degrees
                    # use nearest neighbour or bilinear interpolation (fast)
                    order=[0, 1],
                    # if mode is constant, use a cval between 0 and 255
                    mode='constant',
                    cval=(0, 255),
                    # use any of scikit-image's warping modes
                    # (see 2nd image from the top for examples)
            ),
            iaa.Sometimes(0.3, iaa.Crop(percent=(0.3, 0.5)))])
    ]) 
开发者ID:divamgupta,项目名称:image-segmentation-keras,代码行数:31,代码来源:augmentation.py

示例7: example_simple_training_setting

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def example_simple_training_setting():
    print("Example: Simple Training Setting")
    import numpy as np
    import imgaug.augmenters as iaa

    def load_batch(batch_idx):
        # dummy function, implement this
        # Return a numpy array of shape (N, height, width, #channels)
        # or a list of (height, width, #channels) arrays (may have different image
        # sizes).
        # Images should be in RGB for colorspace augmentations.
        # (cv2.imread() returns BGR!)
        # Images should usually be in uint8 with values from 0-255.
        return np.zeros((128, 32, 32, 3), dtype=np.uint8) + (batch_idx % 255)

    def train_on_images(images):
        # dummy function, implement this
        pass

    # Pipeline:
    # (1) Crop images from each side by 1-16px, do not resize the results
    #     images back to the input size. Keep them at the cropped size.
    # (2) Horizontally flip 50% of the images.
    # (3) Blur images using a gaussian kernel with sigma between 0.0 and 3.0.
    seq = iaa.Sequential([
        iaa.Crop(px=(1, 16), keep_size=False),
        iaa.Fliplr(0.5),
        iaa.GaussianBlur(sigma=(0, 3.0))
    ])

    for batch_idx in range(100):
        images = load_batch(batch_idx)
        images_aug = seq(images=images)  # done by the library
        train_on_images(images_aug)

        # -----
        # Make sure that the example really does something
        if batch_idx == 0:
            assert not np.array_equal(images, images_aug) 
开发者ID:aleju,项目名称:imgaug,代码行数:41,代码来源:check_readme_examples.py

示例8: augment_soft

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def augment_soft(img):
    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    seq = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5), # horizontally flip 50% of all images
            # crop images by -5% to 10% of their height/width
            iaa.Crop(
                percent=(0, 0.2),
            ),
            iaa.Scale({"height": CROP_SIZE, "width": CROP_SIZE }),
        ],
        random_order=False
    )

    if img.ndim == 3:
        img = seq.augment_images(np.expand_dims(img, axis=0)).squeeze(axis=0)
    else:
        img = seq.augment_images(img)

    return img 
开发者ID:antorsae,项目名称:landmark-recognition-challenge,代码行数:30,代码来源:train.py

示例9: crop

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def crop(image, prob, keys):
    """ Cropping """
    x = random.randint(0, 5)
    y = random.randint(0, 5)
    r = random.uniform(-5, 5)
    aug = iaa.Sequential([iaa.Crop(px=((0, x), (0, y), (0, x), (0, y))), iaa.Affine(shear=r, cval=(0, 255))])
    aug.add(iaa.Multiply(random.uniform(0.25, 1.5)))
    seq_det = aug.to_deterministic()

    image_aug = seq_det.augment_images([image])[0]

    keys = ia.KeypointsOnImage([ia.Keypoint(x=keys[0], y=keys[1]),
                                ia.Keypoint(x=keys[2], y=keys[3]),
                                ia.Keypoint(x=keys[4], y=keys[5]),
                                ia.Keypoint(x=keys[6], y=keys[7]),
                                ia.Keypoint(x=keys[8], y=keys[9])], shape=image.shape)

    keys_aug = seq_det.augment_keypoints([keys])[0]
    k = keys_aug.keypoints
    output = [k[0].x, k[0].y, k[1].x, k[1].y, k[2].x, k[2].y, k[3].x, k[3].y, k[4].x, k[4].y]

    index = 0
    for i in range(0, len(prob)):
        output[index] = output[index] * prob[i]
        output[index + 1] = output[index + 1] * prob[i]
        index = index + 2
    output = np.array(output)
    return image_aug, output 
开发者ID:MahmudulAlam,项目名称:Unified-Gesture-and-Fingertip-Detection,代码行数:30,代码来源:augmentation.py

示例10: example_standard_situation

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def example_standard_situation():
    print("Example: Standard Situation")
    # -------
    # dummy functions to make the example runnable here
    def load_batch(batch_idx):
        return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)

    def train_on_images(images):
        pass

    # -------

    from imgaug import augmenters as iaa

    seq = iaa.Sequential([
        iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
        iaa.Fliplr(0.5), # horizontally flip 50% of the images
        iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
    ])

    for batch_idx in range(1000):
        # 'images' should be either a 4D numpy array of shape (N, height, width, channels)
        # or a list of 3D numpy arrays, each having shape (height, width, channels).
        # Grayscale images must have shape (height, width, 1) each.
        # All images must have numpy's dtype uint8. Values are expected to be in
        # range 0-255.
        images = load_batch(batch_idx)
        images_aug = seq.augment_images(images)
        train_on_images(images_aug)


        # -----
        # Make sure that the example really does something
        if batch_idx == 0:
            assert not np.array_equal(images, images_aug) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:37,代码来源:test_readme_examples.py

示例11: __init__

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def __init__(self, devkit_dpath=None, split='train', years=[2007, 2012],
                 base_wh=[416, 416], scales=[-3, 6], factor=32):

        super(YoloVOCDataset, self).__init__(devkit_dpath, split=split,
                                             years=years)

        self.split = split

        self.factor = factor  # downsample factor of yolo grid

        self.base_wh = np.array(base_wh, dtype=np.int)

        assert np.all(self.base_wh % self.factor == 0)

        self.multi_scale_inp_size = np.array([
            self.base_wh + (self.factor * i) for i in range(*scales)])
        self.multi_scale_out_size = self.multi_scale_inp_size // self.factor

        self.augmenter = None

        if 'train' in split:
            augmentors = [
                # Order used in lightnet is hsv, rc, rf, lb
                # lb is applied externally to augmenters
                iaa.Sometimes(.9, HSVShift(hue=0.1, sat=1.5, val=1.5)),
                iaa.Crop(percent=(0, .2), keep_size=False),
                iaa.Fliplr(p=.5),
            ]
            self.augmenter = iaa.Sequential(augmentors)

        # Used to resize images to the appropriate inp_size without changing
        # the aspect ratio.
        self.letterbox = nh.data.transforms.Resize(None, mode='letterbox') 
开发者ID:Erotemic,项目名称:netharn,代码行数:35,代码来源:yolo_voc.py

示例12: __init__

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def __init__(self, sampler, augment='simple', input_dims=[416, 416],
                 scales=[-3, 6], factor=32):
        super(DetectDataset, self).__init__()

        self.sampler = sampler

        self.factor = factor  # downsample factor of yolo grid
        self.input_dims = np.array(input_dims, dtype=np.int)
        assert np.all(self.input_dims % self.factor == 0)

        self.multi_scale_inp_size = np.array([
            self.input_dims + (self.factor * i) for i in range(*scales)])
        self.multi_scale_out_size = self.multi_scale_inp_size // self.factor

        import imgaug.augmenters as iaa

        self.augmenter = None
        if not augment:
            self.augmenter = None
        elif augment == 'simple':
            augmentors = [
                # Order used in lightnet is hsv, rc, rf, lb
                # lb is applied externally to augmenters
                # iaa.Sometimes(.9, HSVShift(hue=0.1, sat=1.5, val=1.5)),
                iaa.Crop(percent=(0, .2), keep_size=False),
                iaa.Fliplr(p=.5),
            ]
            self.augmenter = iaa.Sequential(augmentors)
        else:
            raise KeyError(augment)

        # Used to resize images to the appropriate inp_size without changing
        # the aspect ratio.
        self.letterbox = nh.data.transforms.Resize(None, mode='letterbox')

        self.input_id = ub.hash_data([
            self.sampler._depends()
        ]) 
开发者ID:Erotemic,项目名称:netharn,代码行数:40,代码来源:object_detection.py

示例13: _create_augment_pipeline

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def _create_augment_pipeline():
    from imgaug import augmenters as iaa
    
    ### augmentors by https://github.com/aleju/imgaug
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    aug_pipe = iaa.Sequential(
        [
            # apply the following augmenters to most images
            #iaa.Fliplr(0.5), # horizontally flip 50% of all images
            #iaa.Flipud(0.2), # vertically flip 20% of all images
            #sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
            sometimes(iaa.Affine(
                #scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                #translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                #rotate=(-5, 5), # rotate by -45 to +45 degrees
                #shear=(-5, 5), # shear by -16 to +16 degrees
                #order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                #cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                #mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf((0, 5),
                [
                    #sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
                    #iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                    # search either for all edges or for directed edges
                    #sometimes(iaa.OneOf([
                    #    iaa.EdgeDetect(alpha=(0, 0.7)),
                    #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                    #])),
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
                        #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    #iaa.Invert(0.05, per_channel=True), # invert color channels
                    iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
                    iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
                    #iaa.Grayscale(alpha=(0.0, 1.0)),
                    #sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                    #sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                ],
                random_order=True
            )
        ],
        random_order=True
    )
    return aug_pipe 
开发者ID:penny4860,项目名称:tf2-eager-yolo3,代码行数:63,代码来源:augment.py

示例14: chapter_examples_basics_simple

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def chapter_examples_basics_simple():
    import imgaug as ia
    from imgaug import augmenters as iaa

    # Example batch of images.
    # The array has shape (32, 64, 64, 3) and dtype uint8.
    images = np.array(
        [ia.quokka(size=(64, 64)) for _ in range(32)],
        dtype=np.uint8
    )

    seq = iaa.Sequential([
        iaa.Fliplr(0.5), # horizontal flips
        iaa.Crop(percent=(0, 0.1)), # random crops
        # Small gaussian blur with random sigma between 0 and 0.5.
        # But we only blur about 50% of all images.
        iaa.Sometimes(0.5,
            iaa.GaussianBlur(sigma=(0, 0.5))
        ),
        # Strengthen or weaken the contrast in each image.
        iaa.ContrastNormalization((0.75, 1.5)),
        # Add gaussian noise.
        # For 50% of all images, we sample the noise once per pixel.
        # For the other 50% of all images, we sample the noise per pixel AND
        # channel. This can change the color (not only brightness) of the
        # pixels.
        iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
        # Make some images brighter and some darker.
        # In 20% of all cases, we sample the multiplier once per channel,
        # which can end up changing the color of the images.
        iaa.Multiply((0.8, 1.2), per_channel=0.2),
        # Apply affine transformations to each image.
        # Scale/zoom them, translate/move them, rotate them and shear them.
        iaa.Affine(
            scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
            translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
            rotate=(-25, 25),
            shear=(-8, 8)
        )
    ], random_order=True) # apply augmenters in random order

    ia.seed(1)
    images_aug = seq.augment_images(images)

    # ------------

    save(
        "examples_basics",
        "simple.jpg",
        grid(images_aug, cols=8, rows=4)
    ) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:53,代码来源:generate_documentation_images.py

示例15: example_heavy_augmentations

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Crop [as 别名]
def example_heavy_augmentations():
    print("Example: Heavy Augmentations")
    import imgaug as ia
    from imgaug import augmenters as iaa

    # random example images
    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)

    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
    st = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    seq = iaa.Sequential([
            iaa.Fliplr(0.5), # horizontally flip 50% of all images
            iaa.Flipud(0.5), # vertically flip 50% of all images
            st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
            st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
            st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)), # add gaussian noise to images
            st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
            st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
            st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
            st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
            st(iaa.Grayscale((0.0, 1.0))), # blend with grayscale image
            st(iaa.Affine(
                scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
                rotate=(-45, 45), # rotate by -45 to +45 degrees
                shear=(-16, 16), # shear by -16 to +16 degrees
                order=[0, 1], # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
                cval=(0, 255), # if mode is constant, use a cval between 0 and 1.0
                mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            )),
            st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
        ],
        random_order=True # do all of the above in random order
    )

    images_aug = seq.augment_images(images)

    # -----
    # Make sure that the example really does something
    assert not np.array_equal(images, images_aug) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:48,代码来源:test_readme_examples.py


注:本文中的imgaug.augmenters.Crop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。