本文整理汇总了Python中tensorpack.dataflow.dataset.Cifar10方法的典型用法代码示例。如果您正苦于以下问题:Python dataset.Cifar10方法的具体用法?Python dataset.Cifar10怎么用?Python dataset.Cifar10使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.dataflow.dataset
的用法示例。
在下文中一共展示了dataset.Cifar10方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
pp_mean = ds.get_per_pixel_mean()
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
示例2: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
pp_mean = ds.get_per_pixel_mean()
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = MultiProcessRunner(ds, 3, 2)
return ds
示例3: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
pp_mean = ds.get_per_pixel_mean(('train',))
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = MultiProcessRunner(ds, 3, 2)
return ds
示例4: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test, cifar_classnum):
isTrain = train_or_test == 'train'
if cifar_classnum == 10:
ds = dataset.Cifar10(train_or_test)
else:
ds = dataset.Cifar100(train_or_test)
if isTrain:
augmentors = [
imgaug.RandomCrop((30, 30)),
imgaug.Flip(horiz=True),
imgaug.Brightness(63),
imgaug.Contrast((0.2, 1.8)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
else:
augmentors = [
imgaug.CenterCrop((30, 30)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, 128, remainder=not isTrain)
if isTrain:
ds = MultiProcessRunnerZMQ(ds, 5)
return ds
示例5: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
cifar10_mean = np.asarray([0.4914, 0.4822, 0.4465], dtype="float32") * 255.
cifar10_invstd = 1.0 / (np.asarray([0.2471, 0.2435, 0.2616], dtype="float32") * 255)
if isTrain:
augmentors = imgaug.AugmentorList([
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.RandomCutout(8, 8),
])
def mapf(dp):
img, label = dp
img = (img.astype("float32") - cifar10_mean) * cifar10_invstd
if isTrain:
img = np.pad(img, [(4, 4), (4, 4), (0, 0)], mode='reflect')
img = augmentors.augment(img)
onehot = np.zeros((10, ), dtype=np.float32) + 0.2 / 9
onehot[label] = 0.8
else:
onehot = np.zeros((10, ), dtype=np.float32)
onehot[label] = 1.
if DATA_FORMAT == "NCHW":
img = img.transpose(2, 0, 1)
return img, onehot
if not isTrain:
ds = MapData(ds, mapf)
ds = BatchData(ds, BATCH, remainder=False)
return ds
ds = MultiProcessMapAndBatchDataZMQ(ds, 8, mapf, BATCH, buffer_size=20000)
ds = RepeatedData(ds, -1)
return ds
示例6: get_cifar_augmented_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_cifar_augmented_data(
subset, options, do_multiprocess=True, do_validation=False, shuffle=None):
isTrain = subset == 'train' and do_multiprocess
shuffle = shuffle if shuffle is not None else isTrain
if options.num_classes == 10 and options.ds_name == 'cifar10':
ds = dataset.Cifar10(subset, shuffle=shuffle, do_validation=do_validation)
cutout_length = 16
n_holes=1
elif options.num_classes == 100 and options.ds_name == 'cifar100':
ds = dataset.Cifar100(subset, shuffle=shuffle, do_validation=do_validation)
cutout_length = 8
n_holes=1
else:
raise ValueError('Number of classes must be set to 10(default) or 100 for CIFAR')
logger.info('{} set has n_samples: {}'.format(subset, len(ds.data)))
pp_mean = ds.get_per_pixel_mean()
if isTrain:
logger.info('Will do cut-out with length={} n_holes={}'.format(
cutout_length, n_holes
))
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
imgaug.MapImage(lambda x: (x - pp_mean)/128.0),
Cutout(length=cutout_length, n_holes=n_holes),
]
else:
augmentors = [
imgaug.MapImage(lambda x: (x - pp_mean)/128.0)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
if do_multiprocess:
ds = PrefetchData(ds, 3, 2)
return ds
示例7: get_data
# 需要导入模块: from tensorpack.dataflow import dataset [as 别名]
# 或者: from tensorpack.dataflow.dataset import Cifar10 [as 别名]
def get_data(train_or_test, isMixup, alpha):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
]
ds = AugmentImageComponent(ds, augmentors)
batch = BATCH_SIZE
ds = BatchData(ds, batch, remainder=not isTrain)
def f(dp):
images, labels = dp
one_hot_labels = np.eye(CLASS_NUM)[labels] # one hot coding
if not isTrain or not isMixup:
return [images, one_hot_labels]
# mixup implementation:
# Note that for larger images, it's more efficient to do mixup on GPUs (i.e. in the graph)
weight = np.random.beta(alpha, alpha, BATCH_SIZE)
x_weight = weight.reshape(BATCH_SIZE, 1, 1, 1)
y_weight = weight.reshape(BATCH_SIZE, 1)
index = np.random.permutation(BATCH_SIZE)
x1, x2 = images, images[index]
x = x1 * x_weight + x2 * (1 - x_weight)
y1, y2 = one_hot_labels, one_hot_labels[index]
y = y1 * y_weight + y2 * (1 - y_weight)
return [x, y]
ds = MapData(ds, f)
return ds