本文整理汇总了Python中tensorpack.imgaug.CenterCrop方法的典型用法代码示例。如果您正苦于以下问题:Python imgaug.CenterCrop方法的具体用法?Python imgaug.CenterCrop怎么用?Python imgaug.CenterCrop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.imgaug
的用法示例。
在下文中一共展示了imgaug.CenterCrop方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _augment
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
示例2: normal_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def normal_augmentor(isTrain):
"""
Normal augmentor with random crop and flip only, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)),
]
return augmentors
示例3: get_valid_augmentors
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def get_valid_augmentors(self, input_shape, output_shape, view=False):
print(input_shape, output_shape)
shape_augs = [
imgaug.CenterCrop(input_shape),
]
input_augs = None
label_augs = []
if self.model_type == 'unet' or self.model_type == 'micronet':
label_augs =[GenInstanceUnetMap(crop_shape=output_shape)]
if self.model_type == 'dcan':
label_augs =[GenInstanceContourMap(crop_shape=output_shape)]
if self.model_type == 'dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=False)]
if self.model_type == 'np_hv':
label_augs = [GenInstanceHV(crop_shape=output_shape)]
if self.model_type == 'np_dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=True)]
label_augs.append(BinarizeLabel())
if not view:
label_augs.append(imgaug.CenterCrop(output_shape))
return shape_augs, input_augs, label_augs
示例4: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(is_training, option):
if is_training:
augmentors = [
imgaug.ToFloat32(),
imgaug.Resize((option.final_size + 32,
option.final_size + 32)),
imgaug.RandomCrop((option.final_size,
option.final_size))]
flip = [imgaug.Flip(horiz=True), imgaug.ToUint8()]
augmentors.extend(flip)
else:
augmentors = [
imgaug.ToFloat32(),
imgaug.Resize((option.final_size + 32, option.final_size + 32)),
imgaug.CenterCrop((option.final_size, option.final_size)),
imgaug.ToUint8()]
return augmentors
示例5: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例6: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
interpolation = cv2.INTER_LINEAR
if isTrain:
"""
Sec 5.1:
We use scale and aspect ratio data augmentation [35] as
in [12]. The network input image is a 224×224 pixel random
crop from an augmented image or its horizontal flip.
"""
augmentors = [
imgaug.GoogleNetRandomCropAndResize(interp=interpolation),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), rgb=False, clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, interp=interpolation),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例7: get_ilsvrc_data_alexnet
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def get_ilsvrc_data_alexnet(is_train, image_size, batchsize, directory):
if is_train:
if not directory.startswith('/'):
ds = ILSVRCTTenthTrain(directory)
else:
ds = ILSVRC12(directory, 'train')
augs = [
imgaug.RandomApplyAug(imgaug.RandomResize((0.9, 1.2), (0.9, 1.2)), 0.7),
imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.7),
imgaug.RandomApplyAug(imgaug.RandomChooseAug([
imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.8, 1.2), clip=False),
imgaug.Contrast((0.8, 1.2), clip=False),
# imgaug.Saturation(0.4, rgb=True),
]),
]), 0.7),
imgaug.Flip(horiz=True),
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop((224, 224)),
]
ds = AugmentImageComponent(ds, augs)
ds = PrefetchData(ds, 1000, multiprocessing.cpu_count())
ds = BatchData(ds, batchsize)
ds = PrefetchData(ds, 10, 4)
else:
if not directory.startswith('/'):
ds = ILSVRCTenthValid(directory)
else:
ds = ILSVRC12(directory, 'val')
ds = AugmentImageComponent(ds, [
imgaug.ResizeShortestEdge(224, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
])
ds = PrefetchData(ds, 100, multiprocessing.cpu_count())
ds = BatchData(ds, batchsize)
return ds
示例8: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例9: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)),
]
return augmentors
示例10: fbresnet_augmentor
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
#####################################################################################################
#####################################################################################################
示例11: get_augmentations
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def get_augmentations(is_train):
if is_train:
augmentors = [
GoogleNetResize(crop_area_fraction=0.76, target_shape=224), # TODO : 76% or 49%?
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=True),
imgaug.Contrast((0.6, 1.4), clip=True),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
示例12: get_train_augmentors
# 需要导入模块: from tensorpack import imgaug [as 别名]
# 或者: from tensorpack.imgaug import CenterCrop [as 别名]
def get_train_augmentors(self, input_shape, output_shape, view=False):
print(input_shape, output_shape)
shape_augs = [
imgaug.Affine(
shear=5, # in degree
scale=(0.8, 1.2),
rotate_max_deg=179,
translate_frac=(0.01, 0.01),
interp=cv2.INTER_NEAREST,
border=cv2.BORDER_CONSTANT),
imgaug.Flip(vert=True),
imgaug.Flip(horiz=True),
imgaug.CenterCrop(input_shape),
]
input_augs = [
imgaug.RandomApplyAug(
imgaug.RandomChooseAug(
[
GaussianBlur(),
MedianBlur(),
imgaug.GaussianNoise(),
]
), 0.5),
# standard color augmentation
imgaug.RandomOrderAug(
[imgaug.Hue((-8, 8), rgb=True),
imgaug.Saturation(0.2, rgb=True),
imgaug.Brightness(26, clip=True),
imgaug.Contrast((0.75, 1.25), clip=True),
]),
imgaug.ToUint8(),
]
label_augs = []
if self.model_type == 'unet' or self.model_type == 'micronet':
label_augs =[GenInstanceUnetMap(crop_shape=output_shape)]
if self.model_type == 'dcan':
label_augs =[GenInstanceContourMap(crop_shape=output_shape)]
if self.model_type == 'dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=False)]
if self.model_type == 'np_hv':
label_augs = [GenInstanceHV(crop_shape=output_shape)]
if self.model_type == 'np_dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=True)]
if not self.type_classification:
label_augs.append(BinarizeLabel())
if not view:
label_augs.append(imgaug.CenterCrop(output_shape))
return shape_augs, input_augs, label_augs