本文整理汇总了Python中nvidia.dali.ops.CoinFlip方法的典型用法代码示例。如果您正苦于以下问题:Python ops.CoinFlip方法的具体用法?Python ops.CoinFlip怎么用?Python ops.CoinFlip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nvidia.dali.ops
的用法示例。
在下文中一共展示了ops.CoinFlip方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
nvjpeg_padding, prefetch_queue=3,
output_layout=types.NCHW, pad_output=True, dtype='float16'):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id, prefetch_queue_depth = prefetch_queue)
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device = "gpu", size = crop_shape)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
self.coin = ops.CoinFlip(probability = 0.5)
示例2: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed=12, local_rank=0, world_size=1,
spos_pre=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=seed + device_id)
color_space_type = types.BGR if spos_pre else types.RGB
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=color_space_type)
self.res = ops.RandomResizedCrop(device="gpu", size=crop,
interp_type=types.INTERP_LINEAR if spos_pre else types.INTERP_TRIANGULAR)
self.twist = ops.ColorTwist(device="gpu")
self.jitter_rng = ops.Uniform(range=[0.6, 1.4])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=color_space_type,
mean=0. if spos_pre else [0.485 * 255, 0.456 * 255, 0.406 * 255],
std=1. if spos_pre else [0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
示例3: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, name, batch_size, num_workers, device_id, num_gpu,
root=os.path.expanduser('~/.mxnet/datasets/face')):
super().__init__(batch_size, num_workers, device_id, seed=12 + device_id)
idx_files = [os.path.join(root, name, "train.idx")]
rec_files = [os.path.join(root, name, "train.rec")]
prop = open(os.path.join(root, name, "property"), "r").read().strip().split(',')
assert len(prop) == 3
self.num_classes = int(prop[0])
self.image_size = [int(prop[1]), int(prop[2])]
self._input = ops.MXNetReader(path=rec_files, index_path=idx_files, random_shuffle=True,
num_shards=num_gpu, tensor_init_bytes=self.image_size[0] * self.image_size[1] * 8)
self._decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB)
self._cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW,
crop=self.image_size, image_type=types.RGB,
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5])
self._contrast = ops.Contrast(device="gpu", )
self._saturation = ops.Saturation(device="gpu", )
self._brightness = ops.Brightness(device="gpu", )
self._uniform = ops.Uniform(range=(0.7, 1.3))
self._coin = ops.CoinFlip(probability=0.5)
self.iter = 0
示例4: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, shard_id, image_dir, file_list, nvjpeg_padding,
prefetch_queue=3, seed=1, num_shards=1, channel_last=True,
spatial_size=(224, 224), dtype="half",
mean=_pixel_mean, std=_pixel_std, pad_output=True):
super(TrainPipeline, self).__init__(
batch_size, num_threads, shard_id, seed=seed, prefetch_queue_depth=prefetch_queue)
self.input = ops.FileReader(file_root=image_dir, file_list=file_list,
random_shuffle=True, num_shards=num_shards, shard_id=shard_id)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB,
device_memory_padding=nvjpeg_padding,
host_memory_padding=nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device="gpu", size=spatial_size)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT16 if dtype == "half" else types.FLOAT,
output_layout=types.NHWC if channel_last else types.NCHW,
crop=spatial_size,
image_type=types.RGB,
mean=mean,
std=std,
pad_output=pad_output)
self.coin = ops.CoinFlip(probability=0.5)
示例5: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, file_list, file_root, crop_size,
batch_size, n_threads, device_id,
random_shuffle=True, seed=-1, mean=None, std=None,
n_samples=None):
super(DaliPipelineTrain, self).__init__(batch_size, n_threads,
device_id, seed=seed)
crop_size = _pair(crop_size)
if mean is None:
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
if std is None:
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
if n_samples is None:
initial_fill = 4096
else:
initial_fill = min(4096, n_samples)
self.loader = ops.FileReader(file_root=file_root, file_list=file_list,
random_shuffle=random_shuffle,
initial_fill=initial_fill)
self.decode = ops.HostDecoder()
self.resize = ops.Resize(device='gpu', resize_x=256, resize_y=256)
# self.hue = ops.Hue(device="gpu")
# self.bright = ops.Brightness(device="gpu")
# self.cntrst = ops.Contrast(device="gpu")
# self.rotate = ops.Rotate(device="gpu")
# self.jitter = ops.Jitter(device="gpu")
random_area = (crop_size[0] / 256.0) * (crop_size[1] / 256.0)
random_area = _pair(random_area)
random_aspect_ratio = _pair(1.0)
self.rrcrop = ops.RandomResizedCrop(
device='gpu', size=crop_size, random_area=random_area,
random_aspect_ratio=random_aspect_ratio)
self.cmnorm = ops.CropMirrorNormalize(
device='gpu', crop=list(crop_size), mean=mean, std=std)
self.coin = ops.CoinFlip(probability=0.5)
示例6: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.nvJPEGDecoderRandomCrop(device="mixed", output_type=types.RGB, device_memory_padding=211025920, host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
示例7: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
示例8: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, name, batch_size, num_threads, device_id, num_shards, shard_id,
root=os.path.expanduser('~/.mxnet/datasets/face'), ):
super().__init__(batch_size, num_threads, device_id, seed=12)
idx_files = [os.path.join(root, name, "train.idx")]
rec_files = [os.path.join(root, name, "train.rec")]
prop = open(os.path.join(root, name, "property"), "r").read().strip().split(',')
assert len(prop) == 3
self.num_classes = int(prop[0])
self.image_size = [int(prop[1]), int(prop[2])]
self.size = 0
for idx_file in idx_files:
with open(idx_file, "r") as f:
self.size += len(list(f.readlines()))
self._input = ops.MXNetReader(path=rec_files, index_path=idx_files, random_shuffle=True,
num_shards=num_shards, shard_id=shard_id, seed=12,
tensor_init_bytes=self.image_size[0] * self.image_size[1] * 8)
self._decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self._cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=self.image_size,
image_type=types.RGB,
mean=[0., 0., 0.],
std=[255., 255., 255.])
self._contrast = ops.Contrast(device="gpu")
self._saturation = ops.Saturation(device="gpu")
self._brightness = ops.Brightness(device="gpu")
self._uniform = ops.Uniform(range=(0.7, 1.3))
self._coin = ops.CoinFlip(probability=0.5)
示例9: __new__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __new__(cls, probability=0.5):
"""Create a ``CoinFlip`` operator.
Parameters
----------
probability : float, optional, default=0.5
The probability to return 1.
Returns
-------
nvidia.dali.ops.CoinFlip
The operator.
"""
return ops.CoinFlip(probability=probability)
示例10: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, path, training, annotations, world, device_id, mean, std, resize,
max_size, stride, rotate_augment=False,
augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0,
augment_saturation=0.0):
super().__init__(batch_size=batch_size, num_threads=num_threads, device_id=device_id,
prefetch_queue_depth=num_threads, seed=42)
self.path = path
self.training = training
self.stride = stride
self.iter = 0
self.rotate_augment = rotate_augment
self.augment_brightness = augment_brightness
self.augment_contrast = augment_contrast
self.augment_hue = augment_hue
self.augment_saturation = augment_saturation
self.reader = ops.COCOReader(annotations_file=annotations, file_root=path, num_shards=world,
shard_id=torch.cuda.current_device(),
ltrb=True, ratio=True, shuffle_after_epoch=True, save_img_ids=True)
self.decode_train = ops.ImageDecoderSlice(device="mixed", output_type=types.RGB)
self.decode_infer = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.bbox_crop = ops.RandomBBoxCrop(device='cpu', bbox_layout="xyXY", scaling=[0.3, 1.0],
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9])
self.bbox_flip = ops.BbFlip(device='cpu', ltrb=True)
self.img_flip = ops.Flip(device='gpu')
self.coin_flip = ops.CoinFlip(probability=0.5)
self.bc = ops.BrightnessContrast(device='gpu')
self.hsv = ops.Hsv(device='gpu')
# Random number generation for augmentation
self.brightness_dist = ops.NormalDistribution(mean=1.0, stddev=augment_brightness)
self.contrast_dist = ops.NormalDistribution(mean=1.0, stddev=augment_contrast)
self.hue_dist = ops.NormalDistribution(mean=0.0, stddev=augment_hue)
self.saturation_dist = ops.NormalDistribution(mean=1.0, stddev=augment_saturation)
if rotate_augment:
raise RuntimeWarning("--augment-rotate current has no effect when using the DALI data loader.")
if isinstance(resize, list): resize = max(resize)
self.rand_resize = ops.Uniform(range=[resize, float(max_size)])
self.resize_train = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC, save_attrs=True)
self.resize_infer = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC,
resize_longer=max_size, save_attrs=True)
padded_size = max_size + ((self.stride - max_size % self.stride) % self.stride)
self.pad = ops.Paste(device='gpu', fill_value=0, ratio=1.1, min_canvas_size=padded_size, paste_x=0, paste_y=0)
self.normalize = ops.CropMirrorNormalize(device='gpu', mean=mean, std=std, crop=(padded_size, padded_size),
crop_pos_x=0, crop_pos_y=0)
示例11: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
min_random_area, max_random_area,
min_random_aspect_ratio, max_random_aspect_ratio,
nvjpeg_padding, prefetch_queue=3,
seed=12,
output_layout=types.NCHW, pad_output=True, dtype='float16',
mlperf_print=True):
super(HybridTrainPipe, self).__init__(
batch_size, num_threads, device_id,
seed = seed + device_id,
prefetch_queue_depth = prefetch_queue)
if mlperf_print:
# Shuffiling is done inside ops.MXNetReader
mx_resnet_print(key=mlperf_log.INPUT_ORDER)
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device = "gpu",
random_area = [
min_random_area,
max_random_area],
random_aspect_ratio = [
min_random_aspect_ratio,
max_random_aspect_ratio],
size = crop_shape)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
self.coin = ops.CoinFlip(probability = 0.5)
if mlperf_print:
mx_resnet_print(
key=mlperf_log.INPUT_CROP_USES_BBOXES,
val=False)
mx_resnet_print(
key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE,
val=(min_random_aspect_ratio,
max_random_aspect_ratio))
mx_resnet_print(
key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE,
val=(min_random_area,
max_random_area))
mx_resnet_print(
key=mlperf_log.INPUT_MEAN_SUBTRACTION,
val=_mean_pixel)
mx_resnet_print(
key=mlperf_log.INPUT_RANDOM_FLIP)
示例12: __init__
# 需要导入模块: from nvidia.dali import ops [as 别名]
# 或者: from nvidia.dali.ops import CoinFlip [as 别名]
def __init__(self, batch_size, num_threads, device_id, data_dir, crop,
mean, std, local_rank=0, world_size=1, dali_cpu=False, shuffle=True, fp16=False,
min_crop_size=0.08):
# As we're recreating the Pipeline at every epoch, the seed must be -1 (random seed)
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=-1)
# Enabling read_ahead slowed down processing ~40%
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size,
random_shuffle=shuffle)
# Let user decide which pipeline works best with the chosen model
if dali_cpu:
decode_device = "cpu"
self.dali_device = "cpu"
self.flip = ops.Flip(device=self.dali_device)
else:
decode_device = "mixed"
self.dali_device = "gpu"
output_dtype = types.FLOAT
if self.dali_device == "gpu" and fp16:
output_dtype = types.FLOAT16
self.cmn = ops.CropMirrorNormalize(device="gpu",
output_dtype=output_dtype,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=mean,
std=std,)
# To be able to handle all images from full-sized ImageNet, this padding sets the size of the internal
# nvJPEG buffers without additional reallocations
device_memory_padding = 211025920 if decode_device == 'mixed' else 0
host_memory_padding = 140544512 if decode_device == 'mixed' else 0
self.decode = ops.ImageDecoderRandomCrop(device=decode_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[0.8, 1.25],
random_area=[min_crop_size, 1.0],
num_attempts=100)
# Resize as desired. To match torchvision data loader, use triangular interpolation.
self.res = ops.Resize(device=self.dali_device, resize_x=crop, resize_y=crop,
interp_type=types.INTERP_TRIANGULAR)
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(self.dali_device))