本文整理匯總了Python中nvidia.dali.ops.ImageDecoder方法的典型用法代碼示例。如果您正苦於以下問題:Python ops.ImageDecoder方法的具體用法?Python ops.ImageDecoder怎麽用?Python ops.ImageDecoder使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類nvidia.dali.ops
的用法示例。
在下文中一共展示了ops.ImageDecoder方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed=12, local_rank=0, world_size=1,
spos_pre=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=seed + device_id)
color_space_type = types.BGR if spos_pre else types.RGB
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=color_space_type)
self.res = ops.RandomResizedCrop(device="gpu", size=crop,
interp_type=types.INTERP_LINEAR if spos_pre else types.INTERP_TRIANGULAR)
self.twist = ops.ColorTwist(device="gpu")
self.jitter_rng = ops.Uniform(range=[0.6, 1.4])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=color_space_type,
mean=0. if spos_pre else [0.485 * 255, 0.456 * 255, 0.406 * 255],
std=1. if spos_pre else [0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
示例2: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, batch_size, num_threads, shard_id, image_dir, file_list, nvjpeg_padding,
prefetch_queue=3, seed=1, num_shards=1, channel_last=True,
spatial_size=(224, 224), dtype="half",
mean=_pixel_mean, std=_pixel_std, pad_output=True):
super(TrainPipeline, self).__init__(
batch_size, num_threads, shard_id, seed=seed, prefetch_queue_depth=prefetch_queue)
self.input = ops.FileReader(file_root=image_dir, file_list=file_list,
random_shuffle=True, num_shards=num_shards, shard_id=shard_id)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device="gpu", size=spatial_size)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT16 if dtype == "half" else types.FLOAT,
output_layout=types.NHWC if channel_last else types.NCHW,
crop=spatial_size,
image_type=types.RGB,
mean=mean,
std=std,
pad_output=pad_output)
self.coin = ops.CoinFlip(probability=0.5)
示例3: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
示例4: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, name, batch_size, num_threads, device_id, num_shards, shard_id,
root=os.path.expanduser('~/.mxnet/datasets/face'), ):
super().__init__(batch_size, num_threads, device_id, seed=12)
idx_files = [os.path.join(root, name, "train.idx")]
rec_files = [os.path.join(root, name, "train.rec")]
prop = open(os.path.join(root, name, "property"), "r").read().strip().split(',')
assert len(prop) == 3
self.num_classes = int(prop[0])
self.image_size = [int(prop[1]), int(prop[2])]
self.size = 0
for idx_file in idx_files:
with open(idx_file, "r") as f:
self.size += len(list(f.readlines()))
self._input = ops.MXNetReader(path=rec_files, index_path=idx_files, random_shuffle=True,
num_shards=num_shards, shard_id=shard_id, seed=12,
tensor_init_bytes=self.image_size[0] * self.image_size[1] * 8)
self._decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self._cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=self.image_size,
image_type=types.RGB,
mean=[0., 0., 0.],
std=[255., 255., 255.])
self._contrast = ops.Contrast(device="gpu")
self._saturation = ops.Saturation(device="gpu")
self._brightness = ops.Brightness(device="gpu")
self._uniform = ops.Uniform(range=(0.7, 1.3))
self._coin = ops.CoinFlip(probability=0.5)
示例5: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size,
mean, std, local_rank=0, world_size=1, dali_cpu=False, shuffle=False, fp16=False):
# As we're recreating the Pipeline at every epoch, the seed must be -1 (random seed)
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=-1)
# Enabling read_ahead slowed down processing ~40%
# Note: initial_fill is for the shuffle buffer. As we only want to see every example once, this is set to 1
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=shuffle, initial_fill=1)
if dali_cpu:
decode_device = "cpu"
self.dali_device = "cpu"
self.crop = ops.Crop(device="cpu", crop=(crop, crop))
else:
decode_device = "mixed"
self.dali_device = "gpu"
output_dtype = types.FLOAT
if fp16:
output_dtype = types.FLOAT16
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=output_dtype,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=mean,
std=std)
self.decode = ops.ImageDecoder(device=decode_device, output_type=types.RGB)
# Resize to desired size. To match torchvision dataloader, use triangular interpolation
self.res = ops.Resize(device=self.dali_device, resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
示例6: __new__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __new__(
cls,
output_type='BGR',
host_memory_padding=8388608,
device_memory_padding=16777216,
):
"""Create a ``ImageDecoder`` operator.
Parameters
----------
output_type : {'BGR', 'RGB'}, optional
The output color space.
host_memory_padding : int, optional, default=8388608
The number of bytes for host buffer.
device_memory_padding : int, optional, default=16777216
The number of bytes for device buffer.
Returns
-------
nvidia.dali.ops.ImageDecoder
The operator.
"""
if isinstance(output_type, six.string_types):
output_type = getattr(types, output_type)
return ops.ImageDecoder(
output_type=output_type,
host_memory_padding=host_memory_padding,
device_memory_padding=device_memory_padding,
device=context.get_device_type(mixed=True),
)
示例7: __init__
# 需要導入模塊: from nvidia.dali import ops [as 別名]
# 或者: from nvidia.dali.ops import ImageDecoder [as 別名]
def __init__(self, batch_size, num_threads, path, training, annotations, world, device_id, mean, std, resize,
max_size, stride, rotate_augment=False,
augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0,
augment_saturation=0.0):
super().__init__(batch_size=batch_size, num_threads=num_threads, device_id=device_id,
prefetch_queue_depth=num_threads, seed=42)
self.path = path
self.training = training
self.stride = stride
self.iter = 0
self.rotate_augment = rotate_augment
self.augment_brightness = augment_brightness
self.augment_contrast = augment_contrast
self.augment_hue = augment_hue
self.augment_saturation = augment_saturation
self.reader = ops.COCOReader(annotations_file=annotations, file_root=path, num_shards=world,
shard_id=torch.cuda.current_device(),
ltrb=True, ratio=True, shuffle_after_epoch=True, save_img_ids=True)
self.decode_train = ops.ImageDecoderSlice(device="mixed", output_type=types.RGB)
self.decode_infer = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.bbox_crop = ops.RandomBBoxCrop(device='cpu', bbox_layout="xyXY", scaling=[0.3, 1.0],
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9])
self.bbox_flip = ops.BbFlip(device='cpu', ltrb=True)
self.img_flip = ops.Flip(device='gpu')
self.coin_flip = ops.CoinFlip(probability=0.5)
self.bc = ops.BrightnessContrast(device='gpu')
self.hsv = ops.Hsv(device='gpu')
# Random number generation for augmentation
self.brightness_dist = ops.NormalDistribution(mean=1.0, stddev=augment_brightness)
self.contrast_dist = ops.NormalDistribution(mean=1.0, stddev=augment_contrast)
self.hue_dist = ops.NormalDistribution(mean=0.0, stddev=augment_hue)
self.saturation_dist = ops.NormalDistribution(mean=1.0, stddev=augment_saturation)
if rotate_augment:
raise RuntimeWarning("--augment-rotate current has no effect when using the DALI data loader.")
if isinstance(resize, list): resize = max(resize)
self.rand_resize = ops.Uniform(range=[resize, float(max_size)])
self.resize_train = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC, save_attrs=True)
self.resize_infer = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC,
resize_longer=max_size, save_attrs=True)
padded_size = max_size + ((self.stride - max_size % self.stride) % self.stride)
self.pad = ops.Paste(device='gpu', fill_value=0, ratio=1.1, min_canvas_size=padded_size, paste_x=0, paste_y=0)
self.normalize = ops.CropMirrorNormalize(device='gpu', mean=mean, std=std, crop=(padded_size, padded_size),
crop_pos_x=0, crop_pos_y=0)