当前位置: 首页>>代码示例>>Python>>正文


Python augmenters.OneOf方法代码示例

本文整理汇总了Python中imgaug.augmenters.OneOf方法的典型用法代码示例。如果您正苦于以下问题:Python augmenters.OneOf方法的具体用法?Python augmenters.OneOf怎么用?Python augmenters.OneOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imgaug.augmenters的用法示例。


在下文中一共展示了augmenters.OneOf方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def __init__(self):
        self.seq = iaa.Sequential([
            iaa.Sometimes(0.5, iaa.OneOf([
                iaa.GaussianBlur((0, 3.0)),  # blur images with a sigma between 0 and 3.0
                iaa.AverageBlur(k=(2, 7)),  # blur image using local means with kernel sizes between 2 and 7
                iaa.MedianBlur(k=(3, 11)),  # blur image using local medians with kernel sizes between 2 and 7
            ])),
            iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)),
            iaa.Sometimes(0.5, iaa.Add((-10, 10), per_channel=0.5)),
            iaa.Sometimes(0.5, iaa.AddToHueAndSaturation((-20, 20))),
            iaa.Sometimes(0.5, iaa.FrequencyNoiseAlpha(
                exponent=(-4, 0),
                first=iaa.Multiply((0.5, 1.5), per_channel=True),
                second=iaa.LinearContrast((0.5, 2.0))
            )),
            iaa.Sometimes(0.5, iaa.PiecewiseAffine(scale=(0.01, 0.05))),
            iaa.Sometimes(0.5, iaa.PerspectiveTransform(scale=(0.01, 0.1)))
        ], random_order=True) 
开发者ID:WenmuZhou,项目名称:crnn.gluon,代码行数:20,代码来源:augment.py

示例2: _load_augmentation_aug_geometric

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def _load_augmentation_aug_geometric():
    return iaa.OneOf([
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.2)]),
        iaa.CropAndPad(percent=(-0.05, 0.1),
                       pad_mode='constant',
                       pad_cval=(0, 255)),
        iaa.Crop(percent=(0.0, 0.1)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Crop(percent=(0.3, 0.5)),
        iaa.Sequential([
            iaa.Affine(
                    # scale images to 80-120% of their size,
                    # individually per axis
                    scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    # translate by -20 to +20 percent (per axis)
                    translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    shear=(-16, 16),  # shear by -16 to +16 degrees
                    # use nearest neighbour or bilinear interpolation (fast)
                    order=[0, 1],
                    # if mode is constant, use a cval between 0 and 255
                    mode='constant',
                    cval=(0, 255),
                    # use any of scikit-image's warping modes
                    # (see 2nd image from the top for examples)
            ),
            iaa.Sometimes(0.3, iaa.Crop(percent=(0.3, 0.5)))])
    ]) 
开发者ID:divamgupta,项目名称:image-segmentation-keras,代码行数:31,代码来源:augmentation.py

示例3: main

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def main():
    aug = iaa.BlendAlphaMask(
        iaa.SomeColorsMaskGen(),
        iaa.OneOf([
            iaa.TotalDropout(1.0),
            iaa.AveragePooling(8)
        ])
    )

    aug2 = iaa.BlendAlphaSomeColors(iaa.OneOf([
            iaa.TotalDropout(1.0),
            iaa.AveragePooling(8)
    ]))

    urls = [
        ("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/"
         "Sarcophilus_harrisii_taranna.jpg/"
         "320px-Sarcophilus_harrisii_taranna.jpg"),
        ("https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/"
         "Vincent_van_Gogh_-_Wheatfield_with_crows_-_Google_Art_Project.jpg/"
         "320px-Vincent_van_Gogh_-_Wheatfield_with_crows_-_Google_Art_Project"
         ".jpg"),
        ("https://upload.wikimedia.org/wikipedia/commons/thumb/0/0c/"
         "Galerella_sanguinea_Zoo_Praha_2011-2.jpg/207px-Galerella_sanguinea_"
         "Zoo_Praha_2011-2.jpg"),
        ("https://upload.wikimedia.org/wikipedia/commons/thumb/9/96/"
         "Ambrosius_Bosschaert_the_Elder_%28Dutch_-_Flower_Still_Life_-_"
         "Google_Art_Project.jpg/307px-Ambrosius_Bosschaert_the_Elder_%28"
         "Dutch_-_Flower_Still_Life_-_Google_Art_Project.jpg")
    ]

    for url in urls:
        img = imageio.imread(url)
        ia.imshow(ia.draw_grid(aug(images=[img]*25), cols=5, rows=5))
        ia.imshow(ia.draw_grid(aug2(images=[img]*25), cols=5, rows=5)) 
开发者ID:aleju,项目名称:imgaug,代码行数:37,代码来源:check_blendalphasomecolors.py

示例4: main

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def main():
    aug = iaa.BlendAlphaMask(
        iaa.SegMapClassIdsMaskGen(1),
        iaa.OneOf([
            iaa.TotalDropout(1.0),
            iaa.AveragePooling(8)
        ])
    )

    aug2 = iaa.BlendAlphaSegMapClassIds(
        1, iaa.OneOf([
            iaa.TotalDropout(1.0),
            iaa.AveragePooling(8)
        ])
    )

    image = ia.data.quokka(0.25)
    segmap = ia.data.quokka_segmentation_map(0.25)

    images_aug, segmaps_aug = aug(images=[image]*25,
                                  segmentation_maps=[segmap]*25)
    ia.imshow(ia.draw_grid(images_aug, cols=5, rows=5))

    images_aug, segmaps_aug = aug2(images=[image]*25,
                                  segmentation_maps=[segmap]*25)
    ia.imshow(ia.draw_grid(images_aug, cols=5, rows=5)) 
开发者ID:aleju,项目名称:imgaug,代码行数:28,代码来源:check_blendalphasegmapclassids.py

示例5: chapter_augmenters_oneof

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def chapter_augmenters_oneof():
    aug = iaa.OneOf([
        iaa.Affine(rotate=45),
        iaa.AdditiveGaussianNoise(scale=0.2*255),
        iaa.Add(50, per_channel=True),
        iaa.Sharpen(alpha=0.5)
    ])
    run_and_save_augseq(
        "oneof.jpg", aug,
        [ia.quokka(size=(128, 128)) for _ in range(8)], cols=4, rows=2
    ) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:13,代码来源:generate_documentation_images.py

示例6: train

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def train(model, dataset_dir, subset):
    """Train the model."""
    # Training dataset.
    dataset_train = NucleusDataset()
    dataset_train.load_nucleus(dataset_dir, subset)
    dataset_train.prepare()

    # Validation dataset
    dataset_val = NucleusDataset()
    dataset_val.load_nucleus(dataset_dir, "val")
    dataset_val.prepare()

    # Image augmentation
    # http://imgaug.readthedocs.io/en/latest/source/augmenters.html
    augmentation = iaa.SomeOf((0, 2), [
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.OneOf([iaa.Affine(rotate=90),
                   iaa.Affine(rotate=180),
                   iaa.Affine(rotate=270)]),
        iaa.Multiply((0.8, 1.5)),
        iaa.GaussianBlur(sigma=(0.0, 5.0))
    ])

    # *** This training schedule is an example. Update to your needs ***

    # If starting from imagenet, train heads only for a bit
    # since they have random weights
    print("Train network heads")
    model.train(dataset_train, dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=20,
                augmentation=augmentation,
                layers='heads')

    print("Train all layers")
    model.train(dataset_train, dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=40,
                augmentation=augmentation,
                layers='all')


############################################################
#  RLE Encoding
############################################################ 
开发者ID:dmechea,项目名称:PanopticSegmentation,代码行数:48,代码来源:nucleus.py

示例7: __getitem__

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def __getitem__(self, item):
        if not self.aug:
            uuid = self.list[item]
        else:
            uuid = self.list[item // test_aug_sz]

        colors = ['red', 'green', 'blue', 'yellow']
        flags = cv2.IMREAD_GRAYSCALE
        img = [cv2.imread(os.path.join(self.default_path, uuid + '_' + color + self.ext), flags) for color in colors]
        if self.resize:
            img = [cv2.resize(x, (1024, 1024)) for x in img]

        img = np.stack(img, axis=-1)

        # TODO : data augmentation zoom/shear/brightness
        if 'train' in self.setname:
            augment_img = iaa.Sequential([
                iaa.OneOf([
                    iaa.Affine(rotate=0),
                    iaa.Affine(rotate=90),
                    iaa.Affine(rotate=180),
                    iaa.Affine(rotate=270),
                    iaa.Fliplr(0.5),
                    iaa.Flipud(0.5),
                ])
            ], random_order=True)
            img = augment_img.augment_image(img)

            # cutout
            if C.get()['cutout_p'] > 0.0:
                img = cutout(C.get()['cutout_size'], C.get()['cutout_p'], False)(img)

            # TODO : channel drop(except green)?
            # d_ch = random.choice([0, 2, 3])
            # img[:, :, d_ch] = 0

        if self.aug:
            # teat-time aug. : tta
            tta_list = list(itertools.product(
                [iaa.Affine(rotate=0), iaa.Affine(rotate=90), iaa.Affine(rotate=180), iaa.Affine(rotate=270)],
                [iaa.Fliplr(0.0), iaa.Fliplr(1.0), iaa.Flipud(1.0), iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])]
            ))
            tta_idx = item % len(tta_list)
            img = tta_list[tta_idx][0].augment_image(img)
            img = tta_list[tta_idx][1].augment_image(img)

        img = img.astype(np.float32)
        img /= 255.  # TODO : different normalization?
        img = np.transpose(img, (2, 0, 1))
        img = np.ascontiguousarray(img)

        if self.setname == 'tests':
            lb = np.zeros(len(name_label_dict), dtype=np.int)
        else:
            lb = [int(x) for x in self.labels.loc[uuid]['Target'].split()]
            lb = np.eye(len(name_label_dict), dtype=np.float)[lb].sum(axis=0)
        return img, lb 
开发者ID:ildoonet,项目名称:kaggle-human-protein-atlas-image-classification,代码行数:59,代码来源:data.py

示例8: _create_augment_pipeline

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def _create_augment_pipeline():
    from imgaug import augmenters as iaa
    
    ### augmentors by https://github.com/aleju/imgaug
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    aug_pipe = iaa.Sequential(
        [
            # apply the following augmenters to most images
            #iaa.Fliplr(0.5), # horizontally flip 50% of all images
            #iaa.Flipud(0.2), # vertically flip 20% of all images
            #sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
            sometimes(iaa.Affine(
                #scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                #translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                #rotate=(-5, 5), # rotate by -45 to +45 degrees
                #shear=(-5, 5), # shear by -16 to +16 degrees
                #order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                #cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                #mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf((0, 5),
                [
                    #sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
                    #iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                    # search either for all edges or for directed edges
                    #sometimes(iaa.OneOf([
                    #    iaa.EdgeDetect(alpha=(0, 0.7)),
                    #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                    #])),
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
                        #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    #iaa.Invert(0.05, per_channel=True), # invert color channels
                    iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
                    iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
                    #iaa.Grayscale(alpha=(0.0, 1.0)),
                    #sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                    #sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                ],
                random_order=True
            )
        ],
        random_order=True
    )
    return aug_pipe 
开发者ID:penny4860,项目名称:tf2-eager-yolo3,代码行数:63,代码来源:augment.py

示例9: heavy_aug_on_fly

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def heavy_aug_on_fly(img, det_mask):
    """Do augmentation with different combination on each training batch
    """

    def image_heavy_augmentation(image, det_masks, ratio_operations=0.6):
        # according to the paper, operations such as shearing, fliping horizontal/vertical,
        # rotating, zooming and channel shifting will be apply
        sometimes = lambda aug: iaa.Sometimes(ratio_operations, aug)
        edge_detect_sometime = lambda aug: iaa.Sometimes(0.1, aug)
        elasitic_sometime = lambda aug:iaa.Sometimes(0.2, aug)
        add_gauss_noise = lambda aug: iaa.Sometimes(0.15, aug)
        hor_flip_angle = np.random.uniform(0, 1)
        ver_flip_angle = np.random.uniform(0, 1)
        seq = iaa.Sequential([
            iaa.SomeOf((0, 5), [
                iaa.Fliplr(hor_flip_angle),
                iaa.Flipud(ver_flip_angle),
                iaa.Affine(shear=(-16, 16)),
                iaa.Affine(scale={'x': (1, 1.6), 'y': (1, 1.6)}),
                iaa.PerspectiveTransform(scale=(0.01, 0.1)),

                # These are additional augmentation.
                #iaa.ContrastNormalization((0.75, 1.5))

            ]),

            edge_detect_sometime(iaa.OneOf([
                iaa.EdgeDetect(alpha=(0, 0.7)),
                iaa.DirectedEdgeDetect(alpha=(0,0.7), direction=(0.0, 1.0)
                                       )
            ])),
            add_gauss_noise(iaa.AdditiveGaussianNoise(loc=0,
                                                      scale=(0.0, 0.05*255),
                                                      per_channel=0.5)
                            ),
            iaa.Sometimes(0.3,
                          iaa.GaussianBlur(sigma=(0, 0.5))
                          ),
            elasitic_sometime(
                iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25))
        ])

        seq_to_deterministic = seq.to_deterministic()
        aug_img = seq_to_deterministic.augment_images(image)
        aug_det_mask = seq_to_deterministic.augment_images(det_masks)
        return aug_img, aug_det_mask

    aug_image, aug_det_mask = image_heavy_augmentation(image=img, det_masks=det_mask)
    return aug_image, aug_det_mask 
开发者ID:zhuyiche,项目名称:sfcn-opi,代码行数:51,代码来源:util.py

示例10: get_augmentations

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import OneOf [as 别名]
def get_augmentations():
    # applies the given augmenter in 50% of all cases,
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    seq = iaa.Sequential([
            # execute 0 to 5 of the following (less important) augmenters per image
            iaa.SomeOf((0, 5),
                [
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)),
                        iaa.AverageBlur(k=(2, 7)), 
                        iaa.MedianBlur(k=(3, 11)),
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
                    iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), 
                    # search either for all edges or for directed edges,
                    # blend the result with the original image using a blobby mask
                    iaa.SimplexNoiseAlpha(iaa.OneOf([
                        iaa.EdgeDetect(alpha=(0.5, 1.0)),
                        iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
                    ])),
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
                        iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
                    iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    iaa.OneOf([
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),
                        iaa.FrequencyNoiseAlpha(
                            exponent=(-4, 0),
                            first=iaa.Multiply((0.5, 1.5), per_channel=True),
                            second=iaa.ContrastNormalization((0.5, 2.0))
                        )
                    ]),
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
                    sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                ],
                random_order=True
            )
        ],
        random_order=True
    )
    return seq

### data transforms 
开发者ID:xl-sr,项目名称:CAL,代码行数:52,代码来源:dataloader.py


注:本文中的imgaug.augmenters.OneOf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。