本文整理汇总了Python中tensorpack.dataflow.imgaug.ResizeShortestEdge方法的典型用法代码示例。如果您正苦于以下问题:Python imgaug.ResizeShortestEdge方法的具体用法?Python imgaug.ResizeShortestEdge怎么用?Python imgaug.ResizeShortestEdge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.dataflow.imgaug
的用法示例。
在下文中一共展示了imgaug.ResizeShortestEdge方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_data
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def get_data(name, batch):
isTrain = name == 'train'
image_shape = 224
if isTrain:
augmentors = [
# use lighter augs if model is too small
GoogleNetResize(crop_area_fraction=0.49 if args.width_ratio < 1 else 0.08,
target_shape=image_shape),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(int(image_shape*256/224), cv2.INTER_CUBIC),
imgaug.CenterCrop((image_shape, image_shape)),
]
return get_imagenet_dataflow(args.data_dir, name, batch, augmentors,
meta_dir = args.meta_dir)
示例2: _augment
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
示例3: get_data
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop(224),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224))]
return get_imagenet_dataflow(args.data, name, batch, augmentors)
示例4: inference_augmentor
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def inference_augmentor():
return [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224))
]
示例5: get_data
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def get_data(is_train,
batch_size,
data_dir_path,
input_image_size=224,
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
if is_train:
augmentors = [
GoogleNetResize(
crop_area_fraction=0.08,
target_shape=input_image_size),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(
0.1,
eigval=np.asarray([0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]], dtype="float32")[::-1, ::-1])]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
# imgaug.ResizeShortestEdge(resize_value, cv2.INTER_CUBIC),
imgaug.ResizeShortestEdge(resize_value, cv2.INTER_LINEAR),
imgaug.CenterCrop((input_image_size, input_image_size))
]
return get_imagenet_dataflow(
datadir=data_dir_path,
is_train=is_train,
batch_size=batch_size,
augmentors=augmentors)
示例6: get_data
# 需要导入模块: from tensorpack.dataflow import imgaug [as 别名]
# 或者: from tensorpack.dataflow.imgaug import ResizeShortestEdge [as 别名]
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
# use lighter augs if model is too small
imgaug.GoogleNetRandomCropAndResize(crop_area_fraction=(0.49 if args.ratio < 1 else 0.08, 1.)),
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return get_imagenet_dataflow(
args.data, name, batch, augmentors)