当前位置: 首页>>代码示例>>Python>>正文


Python augmenters.Invert方法代码示例

本文整理汇总了Python中imgaug.augmenters.Invert方法的典型用法代码示例。如果您正苦于以下问题:Python augmenters.Invert方法的具体用法?Python augmenters.Invert怎么用?Python augmenters.Invert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imgaug.augmenters的用法示例。


在下文中一共展示了augmenters.Invert方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_returns_correct_instance

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Invert [as 别名]
def test_returns_correct_instance(self):
        aug = iaa.pillike.Solarize()
        assert isinstance(aug, iaa.Invert)
        assert aug.per_channel.value == 0
        assert aug.min_value is None
        assert aug.max_value is None
        assert np.isclose(aug.threshold.value, 128)
        assert aug.invert_above_threshold.value == 1 
开发者ID:aleju,项目名称:imgaug,代码行数:10,代码来源:test_pillike.py

示例2: chapter_augmenters_invert

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Invert [as 别名]
def chapter_augmenters_invert():
    aug = iaa.Invert(0.5)
    run_and_save_augseq(
        "invert.jpg", aug,
        [ia.quokka(size=(64, 64)) for _ in range(16)], cols=8, rows=2
    )

    aug = iaa.Invert(0.25, per_channel=0.5)
    run_and_save_augseq(
        "invert_per_channel.jpg", aug,
        [ia.quokka(size=(64, 64)) for _ in range(16)], cols=8, rows=2
    ) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:14,代码来源:generate_documentation_images.py

示例3: _create_augment_pipeline

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Invert [as 别名]
def _create_augment_pipeline():
    from imgaug import augmenters as iaa
    
    ### augmentors by https://github.com/aleju/imgaug
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    aug_pipe = iaa.Sequential(
        [
            # apply the following augmenters to most images
            #iaa.Fliplr(0.5), # horizontally flip 50% of all images
            #iaa.Flipud(0.2), # vertically flip 20% of all images
            #sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
            sometimes(iaa.Affine(
                #scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                #translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                #rotate=(-5, 5), # rotate by -45 to +45 degrees
                #shear=(-5, 5), # shear by -16 to +16 degrees
                #order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                #cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                #mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf((0, 5),
                [
                    #sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
                    #iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                    # search either for all edges or for directed edges
                    #sometimes(iaa.OneOf([
                    #    iaa.EdgeDetect(alpha=(0, 0.7)),
                    #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                    #])),
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
                        #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    #iaa.Invert(0.05, per_channel=True), # invert color channels
                    iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
                    iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
                    #iaa.Grayscale(alpha=(0.0, 1.0)),
                    #sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                    #sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                ],
                random_order=True
            )
        ],
        random_order=True
    )
    return aug_pipe 
开发者ID:penny4860,项目名称:tf2-eager-yolo3,代码行数:63,代码来源:augment.py

示例4: test_Invert

# 需要导入模块: from imgaug import augmenters [as 别名]
# 或者: from imgaug.augmenters import Invert [as 别名]
def test_Invert():
    reseed()

    zeros = np.zeros((4, 4, 3), dtype=np.uint8)
    keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
                                      ia.Keypoint(x=2, y=2)], shape=zeros.shape)]

    observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
    expected = zeros
    assert np.array_equal(observed, expected)

    observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
    expected = zeros + 255
    assert np.array_equal(observed, expected)

    observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
    expected = zeros
    assert np.array_equal(observed, expected)

    observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
    expected = zeros + 100
    assert np.array_equal(observed, expected)

    observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
    expected = zeros + 200
    assert np.array_equal(observed, expected)

    nb_iterations = 1000
    nb_inverted = 0
    for i in sm.xrange(nb_iterations):
        observed = iaa.Invert(p=0.5).augment_image(zeros + 256)
        if np.array_equal(observed, zeros):
            nb_inverted += 1
    pinv = nb_inverted / nb_iterations
    assert 0.4 <= pinv <= 0.6

    # keypoints shouldnt be changed
    aug = iaa.Invert(p=1.0)
    aug_det = iaa.Invert(p=1.0).to_deterministic()
    observed = aug.augment_keypoints(keypoints)
    expected = keypoints
    assert keypoints_equal(observed, expected)

    observed = aug_det.augment_keypoints(keypoints)
    expected = keypoints
    assert keypoints_equal(observed, expected) 
开发者ID:JoshuaPiinRueyPan,项目名称:ViolenceDetection,代码行数:48,代码来源:test.py


注:本文中的imgaug.augmenters.Invert方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。