本文整理匯總了Python中tensorpack.dataflow.imgaug.Flip方法的典型用法代碼示例。如果您正苦於以下問題:Python imgaug.Flip方法的具體用法?Python imgaug.Flip怎麽用?Python imgaug.Flip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorpack.dataflow.imgaug
的用法示例。
在下文中一共展示了imgaug.Flip方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_data(name, batch):
isTrain = name == 'train'
image_shape = 224
if isTrain:
augmentors = [
# use lighter augs if model is too small
GoogleNetResize(crop_area_fraction=0.49 if args.width_ratio < 1 else 0.08,
target_shape=image_shape),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(int(image_shape*256/224), cv2.INTER_CUBIC),
imgaug.CenterCrop((image_shape, image_shape)),
]
return get_imagenet_dataflow(args.data_dir, name, batch, augmentors,
meta_dir = args.meta_dir)
示例2: fbresnet_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def fbresnet_augmentor():
# assme BGR input
augmentors = [
imgaug.GoogleNetRandomCropAndResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb->bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
return augmentors
示例3: fbresnet_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def fbresnet_augmentor():
# assme BGR input
augmentors = [
GoogleNetResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb->bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
return augmentors
示例4: get_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop(224),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224))]
return get_imagenet_dataflow(args.data, name, batch, augmentors)
示例5: resizeAndLighting_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def resizeAndLighting_augmentor():
# assme BGR input
augmentors = [
imgaug.GoogleNetRandomCropAndResize(),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True),
]
return augmentors
示例6: resizeOnly_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def resizeOnly_augmentor():
# assme BGR input
augmentors = [
imgaug.GoogleNetRandomCropAndResize(),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True),
]
return augmentors
示例7: get_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_data(is_train,
batch_size,
data_dir_path,
input_image_size=224,
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
if is_train:
augmentors = [
GoogleNetResize(
crop_area_fraction=0.08,
target_shape=input_image_size),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(
0.1,
eigval=np.asarray([0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]], dtype="float32")[::-1, ::-1])]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
# imgaug.ResizeShortestEdge(resize_value, cv2.INTER_CUBIC),
imgaug.ResizeShortestEdge(resize_value, cv2.INTER_LINEAR),
imgaug.CenterCrop((input_image_size, input_image_size))
]
return get_imagenet_dataflow(
datadir=data_dir_path,
is_train=is_train,
batch_size=batch_size,
augmentors=augmentors)
示例8: get_cifar_augmented_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_cifar_augmented_data(
subset, options, do_multiprocess=True, do_validation=False, shuffle=None):
isTrain = subset == 'train' and do_multiprocess
shuffle = shuffle if shuffle is not None else isTrain
if options.num_classes == 10 and options.ds_name == 'cifar10':
ds = dataset.Cifar10(subset, shuffle=shuffle, do_validation=do_validation)
cutout_length = 16
n_holes=1
elif options.num_classes == 100 and options.ds_name == 'cifar100':
ds = dataset.Cifar100(subset, shuffle=shuffle, do_validation=do_validation)
cutout_length = 8
n_holes=1
else:
raise ValueError('Number of classes must be set to 10(default) or 100 for CIFAR')
logger.info('{} set has n_samples: {}'.format(subset, len(ds.data)))
pp_mean = ds.get_per_pixel_mean()
if isTrain:
logger.info('Will do cut-out with length={} n_holes={}'.format(
cutout_length, n_holes
))
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: (x - pp_mean)/128.0),
Cutout(length=cutout_length, n_holes=n_holes),
]
else:
augmentors = [
imgaug.MapImage(lambda x: (x - pp_mean)/128.0)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
if do_multiprocess:
ds = PrefetchData(ds, 3, 2)
return ds
示例9: get_downsampled_imagenet_augmented_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_downsampled_imagenet_augmented_data(subset, options,
do_multiprocess=True, do_validation=False, shuffle=None):
isTrain = subset == 'train' and do_multiprocess
shuffle = shuffle if shuffle is not None else isTrain
reret = re.search(r'^imagenet([0-9]*)$', options.ds_name)
input_size = int(reret.group(1))
ds = DownsampledImageNet(_data_batch_dir(options.data_dir, input_size),\
subset, shuffle, input_size, do_validation=do_validation)
pp_mean = ds.mean_img
paste_size = ds.input_size * 5 // 4
crop_size = ds.input_size
if isTrain:
augmentors = [
imgaug.CenterPaste((paste_size, paste_size)),
imgaug.RandomCrop((crop_size, crop_size)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: (x - pp_mean)/128.0),
]
else:
augmentors = [
imgaug.MapImage(lambda x: (x - pp_mean)/128.0)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
if do_multiprocess:
ds = PrefetchData(ds, 4, 2)
return ds
示例10: resizeAndLighting_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def resizeAndLighting_augmentor():
# assme BGR input
augmentors = [
GoogleNetResize(),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True),
]
return augmentors
示例11: resizeOnly_augmentor
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def resizeOnly_augmentor():
# assme BGR input
augmentors = [
GoogleNetResize(),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True),
]
return augmentors
示例12: get_data
# 需要導入模塊: from tensorpack.dataflow import imgaug [as 別名]
# 或者: from tensorpack.dataflow.imgaug import Flip [as 別名]
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
# use lighter augs if model is too small
imgaug.GoogleNetRandomCropAndResize(crop_area_fraction=(0.49 if args.ratio < 1 else 0.08, 1.)),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return get_imagenet_dataflow(
args.data, name, batch, augmentors)