本文整理匯總了Python中tensorpack.imgaug.BrightnessScale方法的典型用法代碼示例。如果您正苦於以下問題:Python imgaug.BrightnessScale方法的具體用法?Python imgaug.BrightnessScale怎麽用?Python imgaug.BrightnessScale使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorpack.imgaug
的用法示例。
在下文中一共展示了imgaug.BrightnessScale方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: sample_augmentations
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def sample_augmentations():
ds = CocoPose('/data/public/rw/coco-pose-estimation-lmdb/', is_train=False, only_idx=0)
ds = MapDataComponent(ds, pose_random_scale)
ds = MapDataComponent(ds, pose_rotation)
ds = MapDataComponent(ds, pose_flip)
ds = MapDataComponent(ds, pose_resize_shortestedge_random)
ds = MapDataComponent(ds, pose_crop_random)
ds = MapData(ds, pose_to_img)
augs = [
imgaug.RandomApplyAug(imgaug.RandomChooseAug([
imgaug.GaussianBlur(3),
imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.8, 1.2), clip=False),
imgaug.Contrast((0.8, 1.2), clip=False),
# imgaug.Saturation(0.4, rgb=True),
]),
]), 0.7),
]
ds = AugmentImageComponent(ds, augs)
ds.reset_state()
for l1, l2, l3 in ds.get_data():
CocoPose.display_image(l1, l2, l3)
示例2: fbresnet_augmentor
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例3: fbresnet_augmentor
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
interpolation = cv2.INTER_LINEAR
if isTrain:
"""
Sec 5.1:
We use scale and aspect ratio data augmentation [35] as
in [12]. The network input image is a 224×224 pixel random
crop from an augmented image or its horizontal flip.
"""
augmentors = [
imgaug.GoogleNetRandomCropAndResize(interp=interpolation),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), rgb=False, clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, interp=interpolation),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例4: get_ilsvrc_data_alexnet
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def get_ilsvrc_data_alexnet(is_train, image_size, batchsize, directory):
if is_train:
if not directory.startswith('/'):
ds = ILSVRCTTenthTrain(directory)
else:
ds = ILSVRC12(directory, 'train')
augs = [
imgaug.RandomApplyAug(imgaug.RandomResize((0.9, 1.2), (0.9, 1.2)), 0.7),
imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.7),
imgaug.RandomApplyAug(imgaug.RandomChooseAug([
imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.8, 1.2), clip=False),
imgaug.Contrast((0.8, 1.2), clip=False),
# imgaug.Saturation(0.4, rgb=True),
]),
]), 0.7),
imgaug.Flip(horiz=True),
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop((224, 224)),
]
ds = AugmentImageComponent(ds, augs)
ds = PrefetchData(ds, 1000, multiprocessing.cpu_count())
ds = BatchData(ds, batchsize)
ds = PrefetchData(ds, 10, 4)
else:
if not directory.startswith('/'):
ds = ILSVRCTenthValid(directory)
else:
ds = ILSVRC12(directory, 'val')
ds = AugmentImageComponent(ds, [
imgaug.ResizeShortestEdge(224, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
])
ds = PrefetchData(ds, 100, multiprocessing.cpu_count())
ds = BatchData(ds, batchsize)
return ds
示例5: fbresnet_augmentor
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例6: fbresnet_augmentor
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)),
]
return augmentors
示例7: fbresnet_augmentor
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
#####################################################################################################
#####################################################################################################
示例8: get_augmentations
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import BrightnessScale [as 別名]
def get_augmentations(is_train):
if is_train:
augmentors = [
GoogleNetResize(crop_area_fraction=0.76, target_shape=224), # TODO : 76% or 49%?
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=True),
imgaug.Contrast((0.6, 1.4), clip=True),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors