本文整理汇总了Python中mxnet.gluon.data.vision.transforms.Normalize方法的典型用法代码示例。如果您正苦于以下问题:Python transforms.Normalize方法的具体用法?Python transforms.Normalize怎么用?Python transforms.Normalize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.data.vision.transforms
的用法示例。
在下文中一共展示了transforms.Normalize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_transformer
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
示例2: crop_resize_normalize
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def crop_resize_normalize(img, bbox_list, output_size,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例3: create_transformer
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def create_transformer(self):
train_tforms, eval_tforms = [transforms.Resize(self.args.resize)], [transforms.Resize(self.args.resize)]
if self.args.random_crop:
train_tforms.append(transforms.RandomResizedCrop(self.args.size, scale=(0.8, 1.2)))
else:
train_tforms.append(transforms.CenterCrop(self.args.size))
eval_tforms.append(transforms.CenterCrop(self.args.size))
if self.args.flip:
train_tforms.append(transforms.RandomFlipLeftRight())
if self.args.random_color:
train_tforms.append(transforms.RandomColorJitter(self.args.color_jitter, self.args.color_jitter,
self.args.color_jitter, 0.1))
train_tforms.extend([transforms.ToTensor(), transforms.Normalize(self.args.mean, self.args.std)])
eval_tforms.extend([transforms.ToTensor(), transforms.Normalize(self.args.mean, self.args.std)])
train_tforms = transforms.Compose(train_tforms)
eval_tforms = transforms.Compose(eval_tforms)
return train_tforms, eval_tforms
示例4: cifar10_train_transform
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def cifar10_train_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010),
jitter_param=0.4,
lighting_param=0.1):
assert (ds_metainfo is not None)
assert (ds_metainfo.input_image_size[0] == 32)
return transforms.Compose([
RandomCrop(
size=32,
pad=4),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
示例5: crop_resize_normalize
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def crop_resize_normalize(img, bbox_list, output_size):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例6: get_data_loader
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
if opt.mode == 'symbolic':
val_data = mx.io.NDArrayIter(
mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224)),
label=mx.nd.array(range(opt.dataset_size)),
batch_size=batch_size,
)
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return val_data, batch_fn
示例7: cifar_evaluate
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def cifar_evaluate(net, args):
batch_size = args.batch_size
batch_size *= max(1, args.num_gpus)
ctx = [mx.gpu(i) for i in range(args.num_gpus)] if args.num_gpus > 0 else [mx.cpu()]
net.collect_params().reset_ctx(ctx)
metric = mx.metric.Accuracy()
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
val_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=args.num_workers)
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
outputs = [net(X) for X in data]
metric.update(label, outputs)
return metric.get()[1]
示例8: test_transformer
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.CenterCrop(256),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
示例9: test_normalize
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def test_normalize():
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
data_in = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
out_nd = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in)
data_expected = data_in.asnumpy()
data_expected[:][:][0] = data_expected[:][:][0] / 3.0
data_expected[:][:][1] = (data_expected[:][:][1] - 1.0) / 2.0
data_expected[:][:][2] = data_expected[:][:][2] - 2.0
assert_almost_equal(data_expected, out_nd.asnumpy())
示例10: get_data
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def get_data(batch_size, test_set, query_set):
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_test = transforms.Compose([
transforms.Resize(size=(128, 384), interpolation=1),
transforms.ToTensor(),
normalizer])
test_imgs = ImageTxtDataset(test_set, transform=transform_test)
query_imgs = ImageTxtDataset(query_set, transform=transform_test)
test_data = gluon.data.DataLoader(test_imgs, batch_size, shuffle=False, last_batch='keep', num_workers=8)
query_data = gluon.data.DataLoader(query_imgs, batch_size, shuffle=False, last_batch='keep', num_workers=8)
return test_data, query_data
示例11: get_data_iters
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def get_data_iters(batch_size):
train_set, val_set = LabelList(ratio=opt.ratio, root=opt.dataset_root, name=opt.dataset)
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.Resize(size=(opt.img_width, opt.img_height), interpolation=1),
transforms.RandomFlipLeftRight(),
RandomCrop(size=(opt.img_width, opt.img_height), pad=opt.pad),
transforms.ToTensor(),
normalizer])
train_imgs = ImageTxtDataset(train_set, transform=transform_train)
train_data = gluon.data.DataLoader(train_imgs, batch_size, shuffle=True, last_batch='discard', num_workers=opt.num_workers)
if opt.ratio < 1:
transform_test = transforms.Compose([
transforms.Resize(size=(opt.img_width, opt.img_height), interpolation=1),
transforms.ToTensor(),
normalizer])
val_imgs = ImageTxtDataset(val_set, transform=transform_test)
val_data = gluon.data.DataLoader(val_imgs, batch_size, shuffle=True, last_batch='discard', num_workers=opt.num_workers)
else:
val_data = None
return train_data, val_data
示例12: get_val_data
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def get_val_data(rec_val, batch_size, data_nthreads, input_size, crop_ratio):
def val_batch_fn(batch, ctx):
data = batch[0].as_in_context(ctx)
label = batch[1].as_in_context(ctx)
return data, label
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
crop_ratio = crop_ratio if crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size/crop_ratio))
from gluoncv.utils.transforms import EfficientNetCenterCrop
from autogluon.utils import pil_transforms
if input_size >= 320:
transform_test = transforms.Compose([
pil_transforms.ToPIL(),
EfficientNetCenterCrop(input_size),
pil_transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),
pil_transforms.ToNDArray(),
transforms.ToTensor(),
normalize
])
else:
transform_test = transforms.Compose([
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize
])
val_set = mx.gluon.data.vision.ImageRecordDataset(rec_val).transform_first(transform_test)
val_sampler = SplitSampler(len(val_set), num_parts=num_workers, part_index=rank)
val_data = gluon.data.DataLoader(val_set, batch_size=batch_size,
num_workers=data_nthreads,
sampler=val_sampler)
return val_data, val_batch_fn
# Horovod: pin GPU to local rank
示例13: transform_eval
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def transform_eval(imgs, resize_short=256, crop_size=224,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
"""A util function to transform all images to tensors as network input by applying
normalizations. This function support 1 NDArray or iterable of NDArrays.
Parameters
----------
imgs : NDArray or iterable of NDArray
Image(s) to be transformed.
resize_short : int, default=256
Resize image short side to this value and keep aspect ratio.
crop_size : int, default=224
After resize, crop the center square of size `crop_size`
mean : iterable of float
Mean pixel values.
std : iterable of float
Standard deviations of pixel values.
Returns
-------
mxnet.NDArray or list of such tuple
A (1, 3, H, W) mxnet NDArray as input to network
If multiple image names are supplied, return a list.
"""
if isinstance(imgs, mx.nd.NDArray):
imgs = [imgs]
for im in imgs:
assert isinstance(im, mx.nd.NDArray), "Expect NDArray, got {}".format(type(im))
transform_fn = transforms.Compose([
transforms.Resize(resize_short, keep_ratio=True),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
res = [transform_fn(img).expand_dims(0) for img in imgs]
if len(res) == 1:
return res[0]
return res
示例14: create_loader
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def create_loader(self):
"""
Overwrite the data loader function
:return: pairwised data loader, None, eval source loader, test target loader
"""
cpus = cpu_count()
train_tforms, eval_tforms = [transforms.Resize(self.args.resize)], [transforms.Resize(self.args.resize)]
if self.args.random_crop:
train_tforms.append(transforms.RandomResizedCrop(self.args.size, scale=(0.8, 1.2)))
else:
train_tforms.append(transforms.CenterCrop(self.args.size))
eval_tforms.append(transforms.CenterCrop(self.args.size))
if self.args.flip:
train_tforms.append(transforms.RandomFlipLeftRight())
if self.args.random_color:
train_tforms.append(transforms.RandomColorJitter(self.args.color_jitter, self.args.color_jitter,
self.args.color_jitter, 0.1))
train_tforms.extend([transforms.ToTensor(), transforms.Normalize(self.args.mean, self.args.std)])
eval_tforms.extend([transforms.ToTensor(), transforms.Normalize(self.args.mean, self.args.std)])
train_tforms = transforms.Compose(train_tforms)
eval_tforms = transforms.Compose(eval_tforms)
if 'digits' in self.args.cfg:
trs_set, tes_set, tet_set = self.create_digits_datasets(train_tforms, eval_tforms)
elif 'office' in self.args.cfg:
trs_set, tes_set, tet_set = self.create_office_datasets(train_tforms, eval_tforms)
elif 'visda' in self.args.cfg:
trs_set, tes_set, tet_set = self.create_visda_datasets(train_tforms, eval_tforms)
else:
raise NotImplementedError
self.train_src_loader = DataLoader(trs_set, self.args.bs, shuffle=True, num_workers=cpus)
self.test_src_loader = DataLoader(tes_set, self.args.bs, shuffle=False, num_workers=cpus)
self.test_tgt_loader = DataLoader(tet_set, self.args.bs, shuffle=False, num_workers=cpus)
示例15: get_test_data_source
# 需要导入模块: from mxnet.gluon.data.vision import transforms [as 别名]
# 或者: from mxnet.gluon.data.vision.transforms import Normalize [as 别名]
def get_test_data_source(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=transform_val)
return gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)