當前位置: 首頁>>代碼示例>>Python>>正文


Python transforms.RandomLighting方法代碼示例

本文整理匯總了Python中mxnet.gluon.data.vision.transforms.RandomLighting方法的典型用法代碼示例。如果您正苦於以下問題:Python transforms.RandomLighting方法的具體用法?Python transforms.RandomLighting怎麽用?Python transforms.RandomLighting使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.data.vision.transforms的用法示例。


在下文中一共展示了transforms.RandomLighting方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_transformer

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def test_transformer():
    from mxnet.gluon.data.vision import transforms

    transform = transforms.Compose([
        transforms.Resize(300),
        transforms.Resize(300, keep_ratio=True),
        transforms.CenterCrop(256),
        transforms.RandomResizedCrop(224),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
        transforms.RandomBrightness(0.1),
        transforms.RandomContrast(0.1),
        transforms.RandomSaturation(0.1),
        transforms.RandomHue(0.1),
        transforms.RandomLighting(0.1),
        transforms.ToTensor(),
        transforms.Normalize([0, 0, 0], [1, 1, 1])])

    transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read() 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:21,代碼來源:test_gluon_data_vision.py

示例2: cifar10_train_transform

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def cifar10_train_transform(ds_metainfo,
                            mean_rgb=(0.4914, 0.4822, 0.4465),
                            std_rgb=(0.2023, 0.1994, 0.2010),
                            jitter_param=0.4,
                            lighting_param=0.1):
    assert (ds_metainfo is not None)
    assert (ds_metainfo.input_image_size[0] == 32)
    return transforms.Compose([
        RandomCrop(
            size=32,
            pad=4),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(
            brightness=jitter_param,
            contrast=jitter_param,
            saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=mean_rgb,
            std=std_rgb)
    ]) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:24,代碼來源:cifar10_cls_dataset.py

示例3: test_transformer

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def test_transformer():
    from mxnet.gluon.data.vision import transforms

    transform = transforms.Compose([
		transforms.Resize(300),
		transforms.CenterCrop(256),
		transforms.RandomResizedCrop(224),
		transforms.RandomFlipLeftRight(),
		transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
		transforms.RandomBrightness(0.1),
		transforms.RandomContrast(0.1),
		transforms.RandomSaturation(0.1),
		transforms.RandomHue(0.1),
		transforms.RandomLighting(0.1),
		transforms.ToTensor(),
		transforms.Normalize([0, 0, 0], [1, 1, 1])])

    transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read() 
開發者ID:mahyarnajibi,項目名稱:SNIPER-mxnet,代碼行數:20,代碼來源:test_gluon_data_vision.py

示例4: get_data_loader

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def get_data_loader(data_dir, batch_size, num_workers):
    normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    jitter_param = 0.4
    lighting_param = 0.1
    input_size = opt.input_size
    crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
    resize = int(math.ceil(input_size / crop_ratio))

    def batch_fn(batch, ctx):
        data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
        return data, label

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        normalize
    ])
    transform_test = transforms.Compose([
        transforms.Resize(resize, keep_ratio=True),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        normalize
    ])

    train_data = gluon.data.DataLoader(
        imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
        batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
    val_data = gluon.data.DataLoader(
        imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
        batch_size=batch_size, shuffle=False, num_workers=num_workers)

    return train_data, val_data, batch_fn 
開發者ID:miraclewkf,項目名稱:MXNet-Deep-Learning-in-Action,代碼行數:39,代碼來源:train_imagenet.py

示例5: get_data_loader

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def get_data_loader(data_dir, batch_size, num_workers):
    normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    jitter_param = 0.4
    lighting_param = 0.1

    def batch_fn(batch, ctx):
        data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
        return data, label

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        normalize
    ])
    transform_test = transforms.Compose([
        transforms.Resize(256, keep_ratio=True),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize
    ])

    train_data = gluon.data.DataLoader(
        imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
        batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
    val_data = gluon.data.DataLoader(
        imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
        batch_size=batch_size, shuffle=False, num_workers=num_workers)

    return train_data, val_data, batch_fn 
開發者ID:zzdang,項目名稱:cascade_rcnn_gluon,代碼行數:36,代碼來源:train_imagenet.py

示例6: get_train_data

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def get_train_data(rec_train, batch_size, data_nthreads, input_size, crop_ratio, args):
    def train_batch_fn(batch, ctx):
        data = batch[0].as_in_context(ctx)
        label = batch[1].as_in_context(ctx)
        return data, label

    jitter_param = 0.4
    lighting_param = 0.1
    resize = int(math.ceil(input_size / crop_ratio))

    train_transforms = []
    if args.auto_aug:
        print('Using AutoAugment')
        from autogluon.utils.augment import AugmentationBlock, autoaug_imagenet_policies
        train_transforms.append(AugmentationBlock(autoaug_imagenet_policies()))

    from gluoncv.utils.transforms import EfficientNetRandomCrop
    from autogluon.utils import pil_transforms

    if input_size >= 320:
        train_transforms.extend([
            EfficientNetRandomCrop(input_size),
            pil_transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),
            pil_transforms.RandomHorizontalFlip(),
            pil_transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    else:
        train_transforms.extend([
            transforms.RandomResizedCrop(input_size),
            transforms.RandomFlipLeftRight(),
            transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
                                         saturation=jitter_param),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        
    transform_train = transforms.Compose(train_transforms)

    train_set = mx.gluon.data.vision.ImageRecordDataset(rec_train).transform_first(transform_train)
    train_sampler = SplitSampler(len(train_set), num_parts=num_workers, part_index=rank)

    train_data = gluon.data.DataLoader(train_set, batch_size=batch_size,# shuffle=True,
                                       last_batch='discard', num_workers=data_nthreads,
                                       sampler=train_sampler)
    return train_data, train_batch_fn 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:51,代碼來源:train_horovod.py

示例7: imagenet_train_transform

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def imagenet_train_transform(ds_metainfo,
                             jitter_param=0.4,
                             lighting_param=0.1):
    """
    Create image transform sequence for training subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    jitter_param : float
        How much to jitter values.
    lighting_param : float
        How much to noise intensity of the image.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    input_image_size = ds_metainfo.input_image_size
    if ds_metainfo.aug_type == "aug0":
        interpolation = ds_metainfo.interpolation
        transform_list = []
    elif ds_metainfo.aug_type == "aug1":
        interpolation = 10
        transform_list = []
    elif ds_metainfo.aug_type == "aug2":
        interpolation = 10
        transform_list = [
            ImgAugTransform()
        ]
    else:
        raise RuntimeError("Unknown augmentation type: {}\n".format(ds_metainfo.aug_type))

    transform_list += [
        transforms.RandomResizedCrop(
            size=input_image_size,
            interpolation=interpolation),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(
            brightness=jitter_param,
            contrast=jitter_param,
            saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=ds_metainfo.mean_rgb,
            std=ds_metainfo.std_rgb)
    ]

    return transforms.Compose(transform_list) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:54,代碼來源:imagenet1k_cls_dataset.py

示例8: generate_transform

# 需要導入模塊: from mxnet.gluon.data.vision import transforms [as 別名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 別名]
def generate_transform(train, resize, _is_osx, input_size, jitter_param):
    if _is_osx:
        # using PIL to load image (slow)
        if train:
            transform = Compose(
                [
                    RandomResizedCrop(input_size),
                    RandomHorizontalFlip(),
                    ColorJitter(0.4, 0.4, 0.4),
                    ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
        else:
            transform = Compose(
                [
                    Resize(resize),
                    CenterCrop(input_size),
                    ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
    else:
        if train:
            transform = transforms.Compose(
                [
                    transforms.RandomResizedCrop(input_size),
                    transforms.RandomFlipLeftRight(),
                    transforms.RandomColorJitter(
                        brightness=jitter_param,
                        contrast=jitter_param,
                        saturation=jitter_param
                    ),
                    transforms.RandomLighting(0.1),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
        else:
            transform = transforms.Compose(
                [
                    transforms.Resize(resize),
                    transforms.CenterCrop(input_size),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
    return transform 
開發者ID:awslabs,項目名稱:autogluon,代碼行數:50,代碼來源:dataset.py


注:本文中的mxnet.gluon.data.vision.transforms.RandomLighting方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。