本文整理汇总了Python中gluoncv.model_zoo方法的典型用法代码示例。如果您正苦于以下问题:Python gluoncv.model_zoo方法的具体用法?Python gluoncv.model_zoo怎么用?Python gluoncv.model_zoo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gluoncv
的用法示例。
在下文中一共展示了gluoncv.model_zoo方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import gluoncv [as 别名]
# 或者: from gluoncv import model_zoo [as 别名]
def __init__(self, backbone, channels=1, ctx=mx.cpu(), pretrained=False, **kwargs):
super().__init__()
self.k = kwargs.get('k', 1)
self.channels = channels
model_dict = {'resnet18_v1b': ['layers1_relu3_fwd', 'layers2_relu3_fwd', 'layers3_relu3_fwd', 'layers4_relu3_fwd'],
'resnet34_v1b': ['layers1_relu5_fwd', 'layers2_relu7_fwd', 'layers3_relu11_fwd', 'layers4_relu3_fwd']}
backbone_model = getattr(gcv_model_zoo, backbone)
backbone_outputs = model_dict[backbone]
base_network = backbone_model(pretrained=pretrained, norm_layer=nn.BatchNorm, ctx=ctx, **kwargs)
self.features = FPNFeatureExpander(
network=base_network,
outputs=backbone_outputs, num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=False, no_bias=True, pretrained=pretrained,
ctx=ctx)
self.extrac_convs = []
for i in range(4):
weight_init = mx.init.Normal(0.001)
extra_conv = nn.HybridSequential(prefix='extra_conv_{}'.format(i))
with extra_conv.name_scope():
extra_conv.add(nn.Conv2D(256, 3, 1, 1))
extra_conv.add(nn.BatchNorm())
extra_conv.add(nn.Activation('relu'))
extra_conv.initialize(weight_init, ctx=ctx)
self.register_child(extra_conv)
self.extrac_convs.append(extra_conv)
self.decoder_out = nn.HybridSequential(prefix='decoder_out')
with self.decoder_out.name_scope():
weight_init = mx.init.Normal(0.001)
self.decoder_out.add(nn.Conv2D(256, 3, 1, 1))
self.decoder_out.add(nn.BatchNorm())
self.decoder_out.add(nn.Activation('relu'))
self.decoder_out.add(nn.Conv2D(self.channels, 1, 1))
self.decoder_out.initialize(weight_init, ctx=ctx)
示例2: test
# 需要导入模块: import gluoncv [as 别名]
# 或者: from gluoncv import model_zoo [as 别名]
def test(args):
# output folder
outdir = 'outdir'
if not os.path.exists(outdir):
os.makedirs(outdir)
# image transform
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
# dataset and dataloader
if args.eval:
testset = get_segmentation_dataset(
args.dataset, split='val', mode='testval', transform=input_transform)
total_inter, total_union, total_correct, total_label = \
np.int64(0), np.int64(0), np.int64(0), np.int64(0)
else:
testset = get_segmentation_dataset(
args.dataset, split='test', mode='test', transform=input_transform)
test_data = gluon.data.DataLoader(
testset, args.test_batch_size, shuffle=False, last_batch='keep',
batchify_fn=ms_batchify_fn, num_workers=args.workers)
# create network
if args.model_zoo is not None:
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_segmentation_model(model=args.model, dataset=args.dataset, ctx=args.ctx,
backbone=args.backbone, norm_layer=args.norm_layer,
norm_kwargs=args.norm_kwargs, aux=args.aux,
base_size=args.base_size, crop_size=args.crop_size)
# load pretrained weight
assert args.resume is not None, '=> Please provide the checkpoint using --resume'
if os.path.isfile(args.resume):
model.load_parameters(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'" \
.format(args.resume))
print(model)
evaluator = MultiEvalModel(model, testset.num_class, ctx_list=args.ctx)
metric = gluoncv.utils.metrics.SegmentationMetric(testset.num_class)
tbar = tqdm(test_data)
for i, (data, dsts) in enumerate(tbar):
if args.eval:
predicts = [pred[0] for pred in evaluator.parallel_forward(data)]
targets = [target.as_in_context(predicts[0].context) \
for target in dsts]
metric.update(targets, predicts)
pixAcc, mIoU = metric.get()
tbar.set_description( 'pixAcc: %.4f, mIoU: %.4f' % (pixAcc, mIoU))
else:
im_paths = dsts
predicts = evaluator.parallel_forward(data)
for predict, impath in zip(predicts, im_paths):
predict = mx.nd.squeeze(mx.nd.argmax(predict[0], 1)).asnumpy() + \
testset.pred_offset
mask = get_color_pallete(predict, args.dataset)
outname = os.path.splitext(impath)[0] + '.png'
mask.save(os.path.join(outdir, outname))
示例3: __init__
# 需要导入模块: import gluoncv [as 别名]
# 或者: from gluoncv import model_zoo [as 别名]
def __init__(self, args):
self.args = args
# image transform
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
# dataset and dataloader
data_kwargs = {'transform': input_transform, 'base_size': args.base_size,
'crop_size': args.crop_size}
trainset = get_segmentation_dataset(
args.dataset, split=args.train_split, mode='train', **data_kwargs)
valset = get_segmentation_dataset(
args.dataset, split='val', mode='val', **data_kwargs)
self.train_data = gluon.data.DataLoader(
trainset, args.batch_size, shuffle=True, last_batch='rollover',
num_workers=args.workers)
self.eval_data = gluon.data.DataLoader(valset, args.test_batch_size,
last_batch='rollover', num_workers=args.workers)
# create network
if args.model_zoo is not None:
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_segmentation_model(model=args.model, dataset=args.dataset,
backbone=args.backbone, norm_layer=args.norm_layer,
norm_kwargs=args.norm_kwargs, aux=args.aux,
crop_size=args.crop_size)
model.cast(args.dtype)
print(model)
self.net = DataParallelModel(model, args.ctx, args.syncbn)
self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx)
# resume checkpoint if needed
if args.resume is not None:
if os.path.isfile(args.resume):
model.load_parameters(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'" \
.format(args.resume))
# create criterion
criterion = MixSoftmaxCrossEntropyLoss(args.aux, aux_weight=args.aux_weight)
self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn)
# optimizer and lr scheduling
self.lr_scheduler = LRScheduler(mode='poly', base_lr=args.lr,
nepochs=args.epochs,
iters_per_epoch=len(self.train_data),
power=0.9)
kv = mx.kv.create(args.kvstore)
optimizer_params = {'lr_scheduler': self.lr_scheduler,
'wd':args.weight_decay,
'momentum': args.momentum}
if args.dtype == 'float16':
optimizer_params['multi_precision'] = True
if args.no_wd:
for k, v in self.net.module.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
self.optimizer = gluon.Trainer(self.net.module.collect_params(), 'sgd',
optimizer_params, kvstore = kv)
# evaluation metrics
self.metric = gluoncv.utils.metrics.SegmentationMetric(trainset.num_class)