本文整理匯總了Python中nvidia.dali.types.NCHW屬性的典型用法代碼示例。如果您正苦於以下問題:Python types.NCHW屬性的具體用法?Python types.NCHW怎麽用?Python types.NCHW使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類nvidia.dali.types
的用法示例。
在下文中一共展示了types.NCHW屬性的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
nvjpeg_padding, prefetch_queue=3,
output_layout=types.NCHW, pad_output=True, dtype='float16'):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id, prefetch_queue_depth = prefetch_queue)
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device = "gpu", size = crop_shape)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
self.coin = ops.CoinFlip(probability = 0.5)
示例2: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
示例3: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed=12, local_rank=0, world_size=1,
spos_pre=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=seed + device_id)
color_space_type = types.BGR if spos_pre else types.RGB
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=color_space_type)
self.res = ops.RandomResizedCrop(device="gpu", size=crop,
interp_type=types.INTERP_LINEAR if spos_pre else types.INTERP_TRIANGULAR)
self.twist = ops.ColorTwist(device="gpu")
self.jitter_rng = ops.Uniform(range=[0.6, 1.4])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=color_space_type,
mean=0. if spos_pre else [0.485 * 255, 0.456 * 255, 0.406 * 255],
std=1. if spos_pre else [0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
示例4: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, name, batch_size, num_workers, device_id, num_gpu,
root=os.path.expanduser('~/.mxnet/datasets/face')):
super().__init__(batch_size, num_workers, device_id, seed=12 + device_id)
idx_files = [os.path.join(root, name, "train.idx")]
rec_files = [os.path.join(root, name, "train.rec")]
prop = open(os.path.join(root, name, "property"), "r").read().strip().split(',')
assert len(prop) == 3
self.num_classes = int(prop[0])
self.image_size = [int(prop[1]), int(prop[2])]
self._input = ops.MXNetReader(path=rec_files, index_path=idx_files, random_shuffle=True,
num_shards=num_gpu, tensor_init_bytes=self.image_size[0] * self.image_size[1] * 8)
self._decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB)
self._cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW,
crop=self.image_size, image_type=types.RGB,
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5])
self._contrast = ops.Contrast(device="gpu", )
self._saturation = ops.Saturation(device="gpu", )
self._brightness = ops.Brightness(device="gpu", )
self._uniform = ops.Uniform(range=(0.7, 1.3))
self._coin = ops.CoinFlip(probability=0.5)
self.iter = 0
示例5: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, shard_id, image_dir, file_list, nvjpeg_padding,
prefetch_queue=3, seed=1, num_shards=1, channel_last=True,
spatial_size=(224, 224), dtype="half",
mean=_pixel_mean, std=_pixel_std, pad_output=True):
super(TrainPipeline, self).__init__(
batch_size, num_threads, shard_id, seed=seed, prefetch_queue_depth=prefetch_queue)
self.input = ops.FileReader(file_root=image_dir, file_list=file_list,
random_shuffle=True, num_shards=num_shards, shard_id=shard_id)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device="gpu", size=spatial_size)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT16 if dtype == "half" else types.FLOAT,
output_layout=types.NHWC if channel_last else types.NCHW,
crop=spatial_size,
image_type=types.RGB,
mean=mean,
std=std,
pad_output=pad_output)
self.coin = ops.CoinFlip(probability=0.5)
示例6: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
nvjpeg_padding, prefetch_queue=3,
seed=12, resize_shp=None,
output_layout=types.NCHW, pad_output=True, dtype='float16',
mlperf_print=True):
super(HybridValPipe, self).__init__(
batch_size, num_threads, device_id,
seed = seed + device_id,
prefetch_queue_depth = prefetch_queue)
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=False, shard_id=shard_id, num_shards=num_shards)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.resize = ops.Resize(device = "gpu", resize_shorter=resize_shp) if resize_shp else None
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
if mlperf_print:
mx_resnet_print(
key=mlperf_log.INPUT_MEAN_SUBTRACTION,
val=_mean_pixel)
mx_resnet_print(
key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING)
mx_resnet_print(
key=mlperf_log.INPUT_CENTRAL_CROP)
示例7: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, device_id, file_root, annotations_file, num_gpus,
output_fp16=False, output_nhwc=False, pad_output=False, num_threads=1, seed=15):
super(COCOPipeline, self).__init__(batch_size=batch_size, device_id=device_id,
num_threads=num_threads, seed = seed)
try:
shard_id = torch.distributed.get_rank()
except RuntimeError:
shard_id = 0
self.input = ops.COCOReader(file_root = file_root, annotations_file = annotations_file,
shard_id = shard_id, num_shards = num_gpus, ratio=True, ltrb=True, random_shuffle=True)
self.decode = ops.HostDecoder(device = "cpu", output_type = types.RGB)
# Augumentation techniques
self.crop = ops.SSDRandomCrop(device="cpu", num_attempts=1)
self.twist = ops.ColorTwist(device="gpu")
self.resize = ops.Resize(device = "gpu", resize_x = 300, resize_y = 300)
output_dtype = types.FLOAT16 if output_fp16 else types.FLOAT
output_layout = types.NHWC if output_nhwc else types.NCHW
self.normalize = ops.CropMirrorNormalize(device="gpu", crop=(300, 300),
mean=[0.0, 0.0, 0.0],
std=[255.0, 255.0, 255.0],
mirror=0,
output_dtype=output_dtype,
output_layout=output_layout,
pad_output=pad_output)
# Random variables
self.rng1 = ops.Uniform(range=[0.5, 1.5])
self.rng2 = ops.Uniform(range=[0.875, 1.125])
self.rng3 = ops.Uniform(range=[-0.5, 0.5])
示例8: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
示例9: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, name, batch_size, num_threads, device_id, num_shards, shard_id,
root=os.path.expanduser('~/.mxnet/datasets/face'), ):
super().__init__(batch_size, num_threads, device_id, seed=12)
idx_files = [os.path.join(root, name, "train.idx")]
rec_files = [os.path.join(root, name, "train.rec")]
prop = open(os.path.join(root, name, "property"), "r").read().strip().split(',')
assert len(prop) == 3
self.num_classes = int(prop[0])
self.image_size = [int(prop[1]), int(prop[2])]
self.size = 0
for idx_file in idx_files:
with open(idx_file, "r") as f:
self.size += len(list(f.readlines()))
self._input = ops.MXNetReader(path=rec_files, index_path=idx_files, random_shuffle=True,
num_shards=num_shards, shard_id=shard_id, seed=12,
tensor_init_bytes=self.image_size[0] * self.image_size[1] * 8)
self._decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self._cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=self.image_size,
image_type=types.RGB,
mean=[0., 0., 0.],
std=[255., 255., 255.])
self._contrast = ops.Contrast(device="gpu")
self._saturation = ops.Saturation(device="gpu")
self._brightness = ops.Brightness(device="gpu")
self._uniform = ops.Uniform(range=(0.7, 1.3))
self._coin = ops.CoinFlip(probability=0.5)
示例10: __init__
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size,
mean, std, local_rank=0, world_size=1, dali_cpu=False, shuffle=False, fp16=False):
# As we're recreating the Pipeline at every epoch, the seed must be -1 (random seed)
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=-1)
# Enabling read_ahead slowed down processing ~40%
# Note: initial_fill is for the shuffle buffer. As we only want to see every example once, this is set to 1
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=shuffle, initial_fill=1)
if dali_cpu:
decode_device = "cpu"
self.dali_device = "cpu"
self.crop = ops.Crop(device="cpu", crop=(crop, crop))
else:
decode_device = "mixed"
self.dali_device = "gpu"
output_dtype = types.FLOAT
if fp16:
output_dtype = types.FLOAT16
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=output_dtype,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=mean,
std=std)
self.decode = ops.ImageDecoder(device=decode_device, output_type=types.RGB)
# Resize to desired size. To match torchvision dataloader, use triangular interpolation
self.res = ops.Resize(device=self.dali_device, resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
示例11: get_rec_iter
# 需要導入模塊: from nvidia.dali import types [as 別名]
# 或者: from nvidia.dali.types import NCHW [as 別名]
def get_rec_iter(args, kv=None):
# resize is default base length of shorter edge for dataset;
# all images will be reshaped to this size
resize = int(args.resize)
# target shape is final shape of images pipelined to network;
# all images will be cropped to this size
target_shape = tuple([int(l) for l in args.image_shape.split(',')])
pad_output = target_shape[0] == 4
gpus = list(map(int, filter(None, args.gpus.split(',')))) # filter to not encount eventually empty strings
batch_size = args.batch_size//len(gpus)
num_threads = args.dali_threads
#db_folder = "/data/imagenet/train-480-val-256-recordio/"
# the input_layout w.r.t. the model is the output_layout of the image pipeline
output_layout = types.NHWC if args.input_layout == 'NHWC' else types.NCHW
rank = kv.rank if kv else 0
nWrk = kv.num_workers if kv else 1
trainpipes = [HybridTrainPipe(batch_size = batch_size,
num_threads = num_threads,
device_id = gpu_id,
rec_path = args.data_train,
idx_path = args.data_train_idx,
shard_id = gpus.index(gpu_id) + len(gpus)*rank,
num_shards = len(gpus)*nWrk,
crop_shape = target_shape[1:],
output_layout = output_layout,
pad_output = pad_output,
dtype = args.dtype,
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue) for gpu_id in gpus]
valpipes = [HybridValPipe(batch_size = batch_size,
num_threads = num_threads,
device_id = gpu_id,
rec_path = args.data_val,
idx_path = args.data_val_idx,
shard_id = 0 if args.separ_val
else gpus.index(gpu_id) + len(gpus)*rank,
num_shards = 1 if args.separ_val else len(gpus)*nWrk,
crop_shape = target_shape[1:],
resize_shp = resize,
output_layout = output_layout,
pad_output = pad_output,
dtype = args.dtype,
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue) for gpu_id in gpus] if args.data_val else None
trainpipes[0].build()
if args.data_val:
valpipes[0].build()
if args.num_examples < trainpipes[0].epoch_size("Reader"):
warnings.warn("{} training examples will be used, although full training set contains {} examples".format(args.num_examples, trainpipes[0].epoch_size("Reader")))
dali_train_iter = DALIClassificationIterator(trainpipes, args.num_examples // nWrk)
dali_val_iter = DALIClassificationIterator(valpipes, valpipes[0].epoch_size("Reader") // (1 if args.separ_val else nWrk), fill_last_batch = False) if args.data_val else None
return dali_train_iter, dali_val_iter