本文整理匯總了Python中tensorpack.imgaug.AugmentorList方法的典型用法代碼示例。如果您正苦於以下問題:Python imgaug.AugmentorList方法的具體用法?Python imgaug.AugmentorList怎麽用?Python imgaug.AugmentorList使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorpack.imgaug
的用法示例。
在下文中一共展示了imgaug.AugmentorList方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_imagenet_dataflow
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import AugmentorList [as 別名]
def get_imagenet_dataflow(
datadir, name, batch_size,
augmentors, meta_dir=None, parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
#parallel = 1
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if isTrain:
ds = dataset.ILSVRC12(datadir, name, meta_dir=meta_dir, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, meta_dir= meta_dir, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
return ds
示例2: get_val_dataflow
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import AugmentorList [as 別名]
def get_val_dataflow(
datadir, batch_size,
augmentors=None, parallel=None,
num_splits=None, split_index=None):
if augmentors is None:
augmentors = fbresnet_augmentor(False)
assert datadir is not None
assert isinstance(augmentors, list)
if parallel is None:
parallel = min(40, mp.cpu_count())
if num_splits is None:
ds = dataset.ILSVRC12Files(datadir, 'val', shuffle=False)
else:
# shard validation data
assert split_index < num_splits
files = dataset.ILSVRC12Files(datadir, 'val', shuffle=False)
files.reset_state()
files = list(files.get_data())
logger.info("Number of validation data = {}".format(len(files)))
split_size = len(files) // num_splits
start, end = split_size * split_index, split_size * (split_index + 1)
end = min(end, len(files))
logger.info("Local validation split = {} - {}".format(start, end))
files = files[start: end]
ds = DataFromList(files, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf,
buffer_size=min(2000, ds.size()), strict=True)
ds = BatchData(ds, batch_size, remainder=True)
# do not fork() under MPI
return ds
示例3: get_imagenet_dataflow
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import AugmentorList [as 別名]
def get_imagenet_dataflow(
datadir, name, batch_size,
augmentors, parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
meta_dir = os.path.join(datadir, "meta")
if parallel is None:
parallel = min(40, multiprocessing.cpu_count())
if isTrain:
ds = Imagenet5k(datadir, name, meta_dir=meta_dir, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = Imagenet5kFiles(datadir, name, meta_dir=meta_dir, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
return ds
示例4: get_imagenet_dataflow
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import AugmentorList [as 別名]
def get_imagenet_dataflow(
datadir, name, batch_size,
augmentors, parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
if parallel is None:
parallel = min(30, multiprocessing.cpu_count())
if isTrain:
ds = dataset.ILSVRC12(datadir, name, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
return ds
示例5: get_imagenet_dataflow
# 需要導入模塊: from tensorpack import imgaug [as 別名]
# 或者: from tensorpack.imgaug import AugmentorList [as 別名]
def get_imagenet_dataflow(
datadir, name, batch_size,
augmentors, parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 6)
if isTrain:
ds = dataset.ILSVRC12(datadir, name, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warning("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchData(ds, 1000, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = np.zeros((256, 256, 3), dtype=np.uint8)
for _ in range(30):
try:
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
break
except Exception as e:
logger.warning(str(e), 'file=', fname)
time.sleep(1)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchData(ds, 100, 1)
return ds