本文整理汇总了Python中gluoncv.data.imagenet方法的典型用法代码示例。如果您正苦于以下问题:Python data.imagenet方法的具体用法?Python data.imagenet怎么用?Python data.imagenet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gluoncv.data
的用法示例。
在下文中一共展示了data.imagenet方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_data_loader
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
if opt.mode == 'symbolic':
val_data = mx.io.NDArrayIter(
mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224)),
label=mx.nd.array(range(opt.dataset_size)),
batch_size=batch_size,
)
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return val_data, batch_fn
示例2: main
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def main():
data = mx.sym.var('data')
if opt.dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
net.cast(np.float16)
out = net(data)
if opt.dtype == 'float16':
out = mx.sym.Cast(data=out, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(out, name='softmax')
# We need to pass the data_iterator to Module so that when the number of workers
# changes, the iterator is updated with new batch size and partition size.
mod = mx.mod.Module(softmax, context=context, data_iterator=data_iterator_fn)
if opt.use_pretrained:
arg_params = {}
for x in net.collect_params().values():
x.reset_ctx(mx.cpu())
arg_params[x.name] = x.data()
else:
arg_params = None
mod.fit(train_data,
arg_params=arg_params,
eval_data = val_data,
num_epoch=opt.num_epochs,
kvstore=kv,
batch_end_callback = ElasticSpeedometer(kv, batch_size, max(1, opt.log_interval)),
epoch_end_callback = mx.callback.do_checkpoint('imagenet-%s'% opt.model, period=save_frequency),
optimizer = optimizer,
optimizer_params=optimizer_params,
initializer=initializer)
示例3: parse_args
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='Imagenet directory for validation.')
parser.add_argument('--rec-dir', type=str, default='',
help='recio directory for validation.')
parser.add_argument('--batch-size', type=int, default=32,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--num-gpus', type=int, default=0,
help='number of gpus to use.')
parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
help='number of preprocessing workers')
parser.add_argument('--model', type=str, required=True,
help='type of model to use. see vision_model for options.')
parser.add_argument('--quantized', action='store_true',
help='use int8 pretrained model')
parser.add_argument('--input-size', type=int, default=224,
help='input shape of the image, default is 224.')
parser.add_argument('--num-batches', type=int, default=100,
help='run specified number of batches for inference')
parser.add_argument('--benchmark', action='store_true',
help='use synthetic data to evalute benchmark')
parser.add_argument('--crop-ratio', type=float, default=0.875,
help='The ratio for crop and input size, for validation dataset only')
parser.add_argument('--params-file', type=str,
help='local parameter file to load, instead of pre-trained weight.')
parser.add_argument('--dtype', type=str,
help='training data type')
parser.add_argument('--use_se', action='store_true',
help='use SE layers or not in resnext. default is false.')
opt = parser.parse_args()
return opt
示例4: get_data_loader
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def get_data_loader(data_dir, batch_size, num_workers, opt):
"""
Creates and returns data MXNet Data Loader object and a function that splits data into batches
"""
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
if opt.mode == 'symbolic':
val_data = mx.io.NDArrayIter(
mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224), ctx=context),
label=mx.nd.array(range(opt.dataset_size)),
batch_size=batch_size,
)
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return val_data, batch_fn
示例5: get_data_loader
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize
])
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_data, val_data, batch_fn
示例6: parse_args
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='training and validation pictures to use.')
parser.add_argument('--rec-train', type=str, default='~/.mxnet/datasets/imagenet/rec/train.rec',
help='the training data')
parser.add_argument('--rec-train-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/train.idx',
help='the index of training data')
parser.add_argument('--rec-val', type=str, default='~/.mxnet/datasets/imagenet/rec/val.rec',
help='the validation data')
parser.add_argument('--rec-val-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/val.idx',
help='the index of validation data')
parser.add_argument('--use-rec', action='store_true',
help='use image record iter for data input. default is false.')
parser.add_argument('--batch-size', type=int, default=32,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--dtype', type=str, default='float32',
help='data type for training. default is float32')
parser.add_argument('--num-gpus', type=int, default=0,
help='number of gpus to use.')
parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
help='number of preprocessing workers')
parser.add_argument('--mode', type=str,
help='mode in which to train the model. options are symbolic, imperative, hybrid')
parser.add_argument('--model', type=str, default='resnet101_v1d_hi',
help='type of model to use. see vision_model for options.')
parser.add_argument('--ratio', type=float, default=0.,
help='percentage of the low frequency part')
parser.add_argument('--input-size', type=int, default=224,
help='size of the input image size. default is 224')
parser.add_argument('--crop-ratio', type=float, default=0.875,
help='Crop ratio during validation. default is 0.875')
parser.add_argument('--use-se', action='store_true',
help='use SE layers or not in resnext. default is false.')
parser.add_argument('--batch-norm', action='store_true',
help='enable batch normalization or not in vgg. default is false.')
parser.add_argument('--resume-params', type=str, default='',
help='path of parameters to load from.')
opt = parser.parse_args()
return opt
示例7: get_data_loader
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(256, keep_ratio=True),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_data, val_data, batch_fn
示例8: train
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(initializer, ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params, kvstore=kv)
L = gluon.loss.SoftmaxCrossEntropyLoss()
best_val_score = 1
for epoch in range(opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
acc_top1.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size)
acc_top1.update(label, outputs)
if opt.log_interval and not (i+1)%opt.log_interval:
_, top1 = acc_top1.get()
err_top1 = 1-top1
logging.info('Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\tlr=%f\taccuracy=%f'%(
epoch, i, batch_size*opt.log_interval/(time.time()-btic), trainer.learning_rate, top1))
btic = time.time()
_, top1 = acc_top1.get()
err_top1 = 1-top1
throughput = int(batch_size * i /(time.time() - tic))
err_top1_val, err_top5_val = test(ctx, val_data)
logging.info('[Epoch %d] Train-accuracy=%f'%(epoch, top1))
logging.info('[Epoch %d] Speed: %d samples/sec\tTime cost=%f'%(epoch, throughput, time.time()-tic))
logging.info('[Epoch %d] Validation-accuracy=%f'%(epoch, 1 - err_top1_val))
logging.info('[Epoch %d] Validation-top_k_accuracy_5=%f'%(epoch, 1 - err_top5_val))
if save_frequency and err_top1_val < best_val_score and epoch > 50:
best_val_score = err_top1_val
net.save_parameters('%s/%.4f-imagenet-%s-%d-best.params'%(save_dir, best_val_score, model_name, epoch))
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, epoch))
if save_frequency and save_dir:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, opt.num_epochs-1))
示例9: parse_args
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='Imagenet directory for validation.')
parser.add_argument('--rec-dir', type=str, default='',
help='recio directory for validation.')
parser.add_argument('--batch-size', type=int, default=32,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--num-gpus', type=int, default=0,
help='number of gpus to use.')
parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
help='number of preprocessing workers')
parser.add_argument('--model', type=str, default='model', required=False,
help='type of model to use. see vision_model for options.')
parser.add_argument('--deploy', action='store_true',
help='whether load static model for deployment')
parser.add_argument('--model-prefix', type=str, required=False,
help='load static model as hybridblock.')
parser.add_argument('--quantized', action='store_true',
help='use int8 pretrained model')
parser.add_argument('--input-size', type=int, default=224,
help='input shape of the image, default is 224.')
parser.add_argument('--num-batches', type=int, default=100,
help='run specified number of batches for inference')
parser.add_argument('--benchmark', action='store_true',
help='use synthetic data to evalute benchmark')
parser.add_argument('--crop-ratio', type=float, default=0.875,
help='The ratio for crop and input size, for validation dataset only')
parser.add_argument('--params-file', type=str,
help='local parameter file to load, instead of pre-trained weight.')
parser.add_argument('--dtype', type=str,
help='training data type')
parser.add_argument('--use_se', action='store_true',
help='use SE layers or not in resnext. default is false.')
parser.add_argument('--calibration', action='store_true',
help='quantize model')
parser.add_argument('--num-calib-batches', type=int, default=5,
help='number of batches for calibration')
parser.add_argument('--quantized-dtype', type=str, default='auto',
choices=['auto', 'int8', 'uint8'],
help='quantization destination data type for input data')
parser.add_argument('--calib-mode', type=str, default='naive',
help='calibration mode used for generating calibration table for the quantized symbol; supports'
' 1. none: no calibration will be used. The thresholds for quantization will be calculated'
' on the fly. This will result in inference speed slowdown and loss of accuracy'
' in general.'
' 2. naive: simply take min and max values of layer outputs as thresholds for'
' quantization. In general, the inference accuracy worsens with more examples used in'
' calibration. It is recommended to use `entropy` mode as it produces more accurate'
' inference results.'
' 3. entropy: calculate KL divergence of the fp32 output and quantized output for optimal'
' thresholds. This mode is expected to produce the best inference accuracy of all three'
' kinds of quantized models if the calibration dataset is representative enough of the'
' inference dataset.')
opt = parser.parse_args()
return opt
示例10: train
# 需要导入模块: from gluoncv import data [as 别名]
# 或者: from gluoncv.data import imagenet [as 别名]
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
L = gluon.loss.SoftmaxCrossEntropyLoss()
best_val_score = 1
for epoch in range(opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
acc_top1.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
lr_scheduler.update(i, epoch)
trainer.step(batch_size)
acc_top1.update(label, outputs)
if opt.log_interval and not (i+1)%opt.log_interval:
_, top1 = acc_top1.get()
err_top1 = 1-top1
logging.info('Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\ttop1-err=%f\tlr=%f'%(
epoch, i, batch_size*opt.log_interval/(time.time()-btic), err_top1,
trainer.learning_rate))
btic = time.time()
_, top1 = acc_top1.get()
err_top1 = 1-top1
throughput = int(batch_size * i /(time.time() - tic))
err_top1_val, err_top5_val = test(ctx, val_data)
logging.info('[Epoch %d] training: err-top1=%f'%(epoch, err_top1))
logging.info('[Epoch %d] speed: %d samples/sec\ttime cost: %f'%(epoch, throughput, time.time()-tic))
logging.info('[Epoch %d] validation: err-top1=%f err-top5=%f'%(epoch, err_top1_val, err_top5_val))
if err_top1_val < best_val_score and epoch > 50:
best_val_score = err_top1_val
net.save_parameters('%s/%.4f-imagenet-%s-%d-best.params'%(save_dir, best_val_score, model_name, epoch))
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, epoch))
if save_frequency and save_dir:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, opt.num_epochs-1))