本文整理汇总了Python中gluoncv.utils.LRScheduler方法的典型用法代码示例。如果您正苦于以下问题:Python utils.LRScheduler方法的具体用法?Python utils.LRScheduler怎么用?Python utils.LRScheduler使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gluoncv.utils
的用法示例。
在下文中一共展示了utils.LRScheduler方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sanity
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def test_sanity():
N = 1000
constant = LRScheduler('constant', base_lr=0, target_lr=1, niters=N)
linear = LRScheduler('linear', base_lr=1, target_lr=2, niters=N)
cosine = LRScheduler('cosine', base_lr=3, target_lr=1, niters=N)
poly = LRScheduler('poly', base_lr=1, target_lr=0, niters=N, power=2)
step = LRScheduler('step', base_lr=1, target_lr=0, niters=N,
step_iter=[100, 500], step_factor=0.1)
compare(constant, 0, 0)
compare(constant, N-1, 0)
compare(linear, 0, 1)
compare(linear, N-1, 2)
compare(cosine, 0, 3)
compare(cosine, N-1, 1)
compare(poly, 0, 1)
compare(poly, N-1, 0)
compare(step, 0, 1)
compare(step, 100, 0.1)
compare(step, 500, 0.01)
compare(step, N-1, 0.01)
示例2: test_params
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def test_params():
N = 1000
linear = LRScheduler('linear', base_lr=1, target_lr=2, niters=N)
linear2 = LRScheduler('linear', baselr=1, targetlr=2, niters=N)
linear3 = LRScheduler('linear', base_lr=1, target_lr=2, niters=N,
baselr=0, targetlr=1)
linear4 = LRScheduler('linear', base_lr=1, target_lr=2, niters=N/2)
linear5 = LRScheduler('linear', base_lr=1, target_lr=2, niters=N/2,
nepochs=N/2, iters_per_epoch=2)
compare(linear, 0, 1)
compare(linear, N-1, 2)
compare(linear2, 0, 1)
compare(linear2, N-1, 2)
compare(linear3, 0, 1)
compare(linear3, N-1, 2)
compare(linear4, 0, 1)
compare(linear4, N/2-1, 2)
compare(linear5, 0, 1)
compare(linear5, N/2-1, 1.5, rtol=0.01)
compare(linear5, N-1, 2)
示例3: test_composed_method
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def test_composed_method():
N = 1000
constant = LRScheduler('constant', base_lr=0, target_lr=1, niters=N)
linear = LRScheduler('linear', base_lr=1, target_lr=2, niters=N)
cosine = LRScheduler('cosine', base_lr=3, target_lr=1, niters=N)
poly = LRScheduler('poly', base_lr=1, target_lr=0, niters=N, power=2)
# components with niters=0 will be ignored
null_cosine = LRScheduler('cosine', base_lr=3, target_lr=1, niters=0)
null_poly = LRScheduler('cosine', base_lr=3, target_lr=1, niters=0)
step = LRScheduler('step', base_lr=1, target_lr=0, niters=N,
step_iter=[100, 500], step_factor=0.1)
arr = LRSequential([constant, null_cosine, linear, cosine, null_poly, poly, step])
# constant
for i in range(N):
compare(arr, i, 0)
# linear
for i in range(N, 2*N):
expect_linear = 2 + (1 - 2) * (1 - (i - N) / (N - 1))
compare(arr, i, expect_linear)
# cosine
for i in range(2*N, 3*N):
expect_cosine = 1 + (3 - 1) * ((1 + cos(pi * (i - 2*N) / (N - 1))) / 2)
compare(arr, i, expect_cosine)
# poly
for i in range(3*N, 4*N):
expect_poly = 0 + (1 - 0) * (pow(1 - (i - 3*N) / (N - 1), 2))
compare(arr, i, expect_poly)
for i in range(4*N, 5*N):
if i - 4*N < 100:
expect_step = 1
elif i - 4*N < 500:
expect_step = 0.1
else:
expect_step = 0.01
compare(arr, i, expect_step)
# out-of-bound index
compare(arr, 10*N, 0.01)
compare(arr, -1, 0)
示例4: test_single_method
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def test_single_method():
N = 1000
constant = LRScheduler('constant', base_lr=0, target_lr=1, niters=N)
linear = LRScheduler('linear', base_lr=1, target_lr=2, niters=N)
cosine = LRScheduler('cosine', base_lr=3, target_lr=1, niters=N)
poly = LRScheduler('poly', base_lr=1, target_lr=0, niters=N, power=2)
step = LRScheduler('step', base_lr=1, target_lr=0, niters=N,
step_iter=[100, 500], step_factor=0.1)
step2 = LRScheduler('step', base_lr=1, target_lr=0,
nepochs=2, iters_per_epoch=N/2,
step_iter=[100, 500], step_factor=0.1)
step3 = LRScheduler('step', base_lr=1, target_lr=0,
nepochs=100, iters_per_epoch=N/100,
step_epoch=[10, 50], step_factor=0.1)
# Test numerical value
for i in range(N):
compare(constant, i, 0)
expect_linear = 2 + (1 - 2) * (1 - i / (N - 1))
compare(linear, i, expect_linear)
expect_cosine = 1 + (3 - 1) * ((1 + cos(pi * i / (N-1))) / 2)
compare(cosine, i, expect_cosine)
expect_poly = 0 + (1 - 0) * (pow(1 - i / (N-1), 2))
compare(poly, i, expect_poly)
if i < 100:
expect_step = 1
elif i < 500:
expect_step = 0.1
else:
expect_step = 0.01
compare(step, i, expect_step)
compare(step2, i, expect_step)
compare(step3, i, expect_step)
# Test out-of-range updates
for i in range(10):
constant.update(i - 3)
linear.update(i - 3)
cosine.update(i - 3)
poly.update(i - 3)
示例5: __init__
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def __init__(self, args):
self.args = args
# image transform
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
# dataset and dataloader
data_kwargs = {'transform': input_transform, 'base_size': args.base_size,
'crop_size': args.crop_size}
trainset = get_segmentation_dataset(
args.dataset, split=args.train_split, mode='train', **data_kwargs)
valset = get_segmentation_dataset(
args.dataset, split='val', mode='val', **data_kwargs)
self.train_data = gluon.data.DataLoader(
trainset, args.batch_size, shuffle=True, last_batch='rollover',
num_workers=args.workers)
self.eval_data = gluon.data.DataLoader(valset, args.test_batch_size,
last_batch='rollover', num_workers=args.workers)
# create network
if args.model_zoo is not None:
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_segmentation_model(model=args.model, dataset=args.dataset,
backbone=args.backbone, norm_layer=args.norm_layer,
norm_kwargs=args.norm_kwargs, aux=args.aux,
crop_size=args.crop_size)
model.cast(args.dtype)
print(model)
self.net = DataParallelModel(model, args.ctx, args.syncbn)
self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx)
# resume checkpoint if needed
if args.resume is not None:
if os.path.isfile(args.resume):
model.load_parameters(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'" \
.format(args.resume))
# create criterion
criterion = MixSoftmaxCrossEntropyLoss(args.aux, aux_weight=args.aux_weight)
self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn)
# optimizer and lr scheduling
self.lr_scheduler = LRScheduler(mode='poly', base_lr=args.lr,
nepochs=args.epochs,
iters_per_epoch=len(self.train_data),
power=0.9)
kv = mx.kv.create(args.kvstore)
optimizer_params = {'lr_scheduler': self.lr_scheduler,
'wd':args.weight_decay,
'momentum': args.momentum}
if args.dtype == 'float16':
optimizer_params['multi_precision'] = True
if args.no_wd:
for k, v in self.net.module.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
self.optimizer = gluon.Trainer(self.net.module.collect_params(), 'sgd',
optimizer_params, kvstore = kv)
# evaluation metrics
self.metric = gluoncv.utils.metrics.SegmentationMetric(trainset.num_class)
示例6: __init__
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import LRScheduler [as 别名]
def __init__(self, args):
self.args = args
# image transform
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
# dataset and dataloader
trainset = get_segmentation_dataset(
args.dataset, split='train', transform=input_transform)
valset = get_segmentation_dataset(
args.dataset, split='val', transform=input_transform)
self.train_data = gluon.data.DataLoader(
trainset, args.batch_size, shuffle=True, last_batch='rollover',
num_workers=args.workers)
self.eval_data = gluon.data.DataLoader(valset, args.test_batch_size,
last_batch='keep', num_workers=args.workers)
# create network
model = get_segmentation_model(model=args.model, dataset=args.dataset,
backbone=args.backbone, norm_layer=args.norm_layer,
aux=args.aux)
print(model)
self.net = DataParallelModel(model, args.ctx, args.syncbn)
self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx)
# resume checkpoint if needed
if args.resume is not None:
if os.path.isfile(args.resume):
model.load_params(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'" \
.format(args.resume))
# create criterion
criterion = SoftmaxCrossEntropyLossWithAux(args.aux)
self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn)
# optimizer and lr scheduling
self.lr_scheduler = LRScheduler(mode='poly', baselr=args.lr, niters=len(self.train_data),
nepochs=args.epochs)
kv = mx.kv.create(args.kvstore)
self.optimizer = gluon.Trainer(self.net.module.collect_params(), 'sgd',
{'lr_scheduler': self.lr_scheduler,
'wd':args.weight_decay,
'momentum': args.momentum,
'multi_precision': True},
kvstore = kv)