本文整理汇总了Python中mxnet.optimizer.SGD属性的典型用法代码示例。如果您正苦于以下问题:Python optimizer.SGD属性的具体用法?Python optimizer.SGD怎么用?Python optimizer.SGD使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mxnet.optimizer
的用法示例。
在下文中一共展示了optimizer.SGD属性的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from mxnet import optimizer [as 别名]
# 或者: from mxnet.optimizer import SGD [as 别名]
def train(params, loader, model=None):
epoch = params.get('epoch', 10)
verbose = params.get("verbose", True)
batch_size = params.get("batch_size", 32)
if model is None:
class_name = params["class_name"]
layer_num = params.get("layer_num", 5)
class_num = params.get("class_num", 3)
s = params.get("s", 4)
b = params.get("b", 2)
yolo = Yolo(layer_num, class_num, s=s, b=b,class_name=class_name)
yolo.initialize(init=Xavier(magnitude=0.02))
else:
print("model load finish")
layer_num = model.layer_num
class_num = model.class_num
s = model.s
b = model.b
yolo = model
if verbose:
print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d \n\ts:%d \n\tb:%d" % \
(epoch, layer_num, class_num, s, b))
ngd = optimizer.SGD(momentum=0.7,learning_rate=0.005)
trainer = gluon.Trainer(yolo.collect_params(), ngd)
for ep in range(epoch):
loader.reset()
mean_loss = 0
t1 = time()
for i, batch in enumerate(loader):
x = batch.data[0]
y = batch.label[0].reshape((-1, 5))
y = translate_y(y, yolo.s, yolo.b, yolo.class_num)
y = nd.array(y)
with autograd.record():
loss_func = TotalLoss(s=s, c=class_num, b=b)
ypre = yolo(x) # (32,output_dim)
loss = nd.mean(loss_func(ypre, y))
mean_loss += loss.asscalar()
loss.backward()
trainer.step(batch_size)
t2 = time()
if verbose:
print("epoch:%d/%d loss:%.5f time:%4f" % (
ep + 1, epoch, mean_loss/32, t2 - t1),
flush=True)
print()
return yolo
示例2: train2
# 需要导入模块: from mxnet import optimizer [as 别名]
# 或者: from mxnet.optimizer import SGD [as 别名]
def train2(params, loader: BaseDataLoader, model=None):
epoch = params.get('epoch', 10)
verbose = params.get("verbose", True)
batch_size = params.get("batch_size", 32)
if model is None:
layer_num = params.get("layer_num", 5)
class_num = params.get("class_num", 3)
s = params.get("s", 4)
b = params.get("b", 2)
yolo = Yolo(layer_num, class_num, s=s, b=b)
yolo.initialize(init=Xavier(magnitude=0.02))
else:
print("model load finish")
layer_num = model.layer_num
class_num = model.class_num
s = model.s
b = model.b
yolo = model
if verbose:
print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d \n\ts:%d \n\tb:%d" % \
(epoch, layer_num, class_num, s, b))
ngd = optimizer.SGD(momentum=0.7,learning_rate=0.0025)
trainer = gluon.Trainer(yolo.collect_params(), ngd)
for ep in range(epoch):
loss = 0
all_batch = int(loader.data_number() / batch_size)
t1 = time()
for _ in range(all_batch):
x, y = loader.next_batch(batch_size)
with autograd.record():
loss_func = TotalLoss(s=s, c=class_num, b=b)
ypre = yolo(x) # (32,output_dim)
loss = nd.mean(loss_func(ypre, y))
loss.backward()
trainer.step(batch_size)
t2 = time()
if verbose:
print("epoch:%d/%d loss:%.5f time:%4f" % (
ep + 1, epoch, loss.asscalar(), t2 - t1),
flush=True)
return yolo