本文整理汇总了Python中mxnet.gluon.data.vision.transforms.RandomLighting方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.RandomLighting方法的具体用法?Python transforms.RandomLighting怎么用?Python transforms.RandomLighting使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.data.vision.transforms
的用法示例。
在下文中一共展示了transforms.RandomLighting方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_transformer
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
示例2: cifar10_train_transform
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def cifar10_train_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010),
jitter_param=0.4,
lighting_param=0.1):
assert (ds_metainfo is not None)
assert (ds_metainfo.input_image_size[0] == 32)
return transforms.Compose([
RandomCrop(
size=32,
pad=4),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
示例3: test_transformer
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.CenterCrop(256),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
示例4: get_data_loader
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize
])
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_data, val_data, batch_fn
示例5: get_data_loader
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_data, val_data, batch_fn
示例6: get_train_data
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def get_train_data(rec_train, batch_size, data_nthreads, input_size, crop_ratio, args):
def train_batch_fn(batch, ctx):
data = batch[0].as_in_context(ctx)
label = batch[1].as_in_context(ctx)
return data, label
jitter_param = 0.4
lighting_param = 0.1
resize = int(math.ceil(input_size / crop_ratio))
train_transforms = []
if args.auto_aug:
print('Using AutoAugment')
from autogluon.utils.augment import AugmentationBlock, autoaug_imagenet_policies
train_transforms.append(AugmentationBlock(autoaug_imagenet_policies()))
from gluoncv.utils.transforms import EfficientNetRandomCrop
from autogluon.utils import pil_transforms
if input_size >= 320:
train_transforms.extend([
EfficientNetRandomCrop(input_size),
pil_transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),
pil_transforms.RandomHorizontalFlip(),
pil_transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transforms.extend([
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transform_train = transforms.Compose(train_transforms)
train_set = mx.gluon.data.vision.ImageRecordDataset(rec_train).transform_first(transform_train)
train_sampler = SplitSampler(len(train_set), num_parts=num_workers, part_index=rank)
train_data = gluon.data.DataLoader(train_set, batch_size=batch_size,# shuffle=True,
last_batch='discard', num_workers=data_nthreads,
sampler=train_sampler)
return train_data, train_batch_fn
示例7: imagenet_train_transform
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def imagenet_train_transform(ds_metainfo,
jitter_param=0.4,
lighting_param=0.1):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
jitter_param : float
How much to jitter values.
lighting_param : float
How much to noise intensity of the image.
Returns
-------
Sequential
Image transform sequence.
"""
input_image_size = ds_metainfo.input_image_size
if ds_metainfo.aug_type == "aug0":
interpolation = ds_metainfo.interpolation
transform_list = []
elif ds_metainfo.aug_type == "aug1":
interpolation = 10
transform_list = []
elif ds_metainfo.aug_type == "aug2":
interpolation = 10
transform_list = [
ImgAugTransform()
]
else:
raise RuntimeError("Unknown augmentation type: {}\n".format(ds_metainfo.aug_type))
transform_list += [
transforms.RandomResizedCrop(
size=input_image_size,
interpolation=interpolation),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize(
mean=ds_metainfo.mean_rgb,
std=ds_metainfo.std_rgb)
]
return transforms.Compose(transform_list)
示例8: generate_transform
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import RandomLighting [as 别名]
def generate_transform(train, resize, _is_osx, input_size, jitter_param):
if _is_osx:
# using PIL to load image (slow)
if train:
transform = Compose(
[
RandomResizedCrop(input_size),
RandomHorizontalFlip(),
ColorJitter(0.4, 0.4, 0.4),
ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
)
else:
transform = Compose(
[
Resize(resize),
CenterCrop(input_size),
ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
)
else:
if train:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param
),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
)
else:
transform = transforms.Compose(
[
transforms.Resize(resize),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
)
return transform