本文整理匯總了Python中mxnet.initializer.Xavier方法的典型用法代碼示例。如果您正苦於以下問題:Python initializer.Xavier方法的具體用法?Python initializer.Xavier怎麽用?Python initializer.Xavier使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.initializer
的用法示例。
在下文中一共展示了initializer.Xavier方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _make_features
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def _make_features(self, layers, filters, batch_norm):
featurizer = nn.HybridSequential(prefix='')
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros',
#name = 'conv%s_%s'%(str(i+1),str(_+1))\
))
if batch_norm:
featurizer.add(nn.BatchNorm())
featurizer.add(nn.Activation('relu'))
featurizer.add(nn.MaxPool2D(strides=2))
return featurizer
示例2: _make_features
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def _make_features(self, layers, filters, batch_norm):
featurizer = mx.gluon.nn.HybridSequential(prefix='')
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(BatchNorm())
featurizer.add(Activation('relu'))
featurizer.add(MaxPool2D(strides=2))
return featurizer
示例3: _init_weight
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def _init_weight(self, name, arr):
if name in self._kwargs.keys():
init_params = self._kwargs[name]
for (k, v) in init_params.items():
if k.lower() == "normal":
random.normal(0, v, out=arr)
elif k.lower() == "uniform":
random.uniform(-v, v, out=arr)
elif k.lower() == "orthogonal":
raise NotImplementedError("Not support at the moment")
elif k.lower() == "xavier":
xa = Xavier(v[0], v[1], v[2])
xa(name, arr)
else:
raise NotImplementedError("Not support")
示例4: __init__
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def __init__(self, freeze=False, batch_norm=False, **kwargs):
super(E2FAR, self).__init__(**kwargs)
with self.name_scope():
self.layers = [2, 2, 3, 3]
self.filters = [64, 128, 256, 512]
self.hidden_units = [4096, 1024]
self.backbone = self._make_features([2, 2, 3, 3], [64, 128, 256, 512], batch_norm)
self.extra_backbone = self._make_features([3], [512], batch_norm)
self.conv6 = gluon.nn.Conv2D(512, kernel_size=5, strides=2, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros')
self.conv7 = gluon.nn.Conv2D(512, kernel_size=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros')
self.conv8 = gluon.nn.Conv2D(512, kernel_size=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros')
self.shape_regressor = self._make_prediction(out_dim=199)
self.exp_regressor = self._make_prediction(out_dim=29)
if freeze:
for _, w in self.backbone.collect_params().items():
w.grad_req = 'null'
for _, w in self.extra_backbone.collect_params().items():
w.grad_req = 'null'
示例5: _make_features
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def _make_features(self, layers, filters, batch_norm):
featurizer = gluon.nn.HybridSequential(prefix='')
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(gluon.nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(gluon.nn.BatchNorm())
featurizer.add(gluon.nn.Activation('relu'))
featurizer.add(gluon.nn.MaxPool2D(strides=2))
return featurizer
示例6: __init__
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def __init__(self, layers, filters, batch_norm=False, **kwargs):
super(VGGAtrousBase, self).__init__(**kwargs)
assert len(layers) == len(filters)
self.init = {
'weight_initializer': Xavier(
rnd_type='gaussian', factor_type='out', magnitude=2),
'bias_initializer': 'zeros'
}
with self.name_scope():
# we use pre-trained weights from caffe, initial scale must change
init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1)) * 255
self.init_scale = self.params.get_constant('init_scale', init_scale)
self.stages = nn.HybridSequential()
for l, f in zip(layers, filters):
stage = nn.HybridSequential(prefix='')
with stage.name_scope():
for _ in range(l):
stage.add(nn.Conv2D(f, kernel_size=3, padding=1, **self.init))
if batch_norm:
stage.add(nn.BatchNorm())
stage.add(nn.Activation('relu'))
self.stages.add(stage)
# use dilated convolution instead of dense layers
stage = nn.HybridSequential(prefix='dilated_')
with stage.name_scope():
stage.add(nn.Conv2D(1024, kernel_size=3, padding=6, dilation=6, **self.init))
if batch_norm:
stage.add(nn.BatchNorm())
stage.add(nn.Activation('relu'))
stage.add(nn.Conv2D(1024, kernel_size=1, **self.init))
if batch_norm:
stage.add(nn.BatchNorm())
stage.add(nn.Activation('relu'))
self.stages.add(stage)
# normalize layer for 4-th stage
self.norm4 = Normalize(filters[3], 20)
示例7: _make_features
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def _make_features(self, layers, filters, batch_norm):
featurizer = nn.HybridSequential(prefix='')
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(nn.BatchNorm())
featurizer.add(nn.Activation('relu'))
featurizer.add(nn.MaxPool2D(strides=2))
return featurizer
示例8: load_params
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def load_params(self, inference, init=initializer.Uniform(), postfix='epoch'):
"""
load the parameters
:param inference: network
:param init: initializer function
:param postfix: postfix
:return:
"""
if self.args.training:
if self.args.pretrained:
# print('load the weights from path: %s' % self.args.model_path)
print('load the weights for features from path: %s' % self.args.model_path)
inference.features.load_parameters(self.args.model_path, self.args.ctx, ignore_extra=True)
print('initialize the weights for embeds and output')
inference.embeds.initialize(init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
inference.output.initialize(init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
elif self.args.model_path.endswith('.params'):
print('load the weights from path: %s' % self.args.model_path)
inference.load_parameters(self.args.model_path, self.args.ctx)
elif self.args.start_epoch > 0:
print('load the weights from path: %s' % os.path.join(self.args.ckpt, '%s-%s-%04d.params'
% (self.args.bb, postfix, 0)))
inference.load_parameters(os.path.join(self.args.ckpt, '%s-%s-%04d.params' %
(self.args.bb, postfix, 0)), self.args.ctx)
else:
print('Initialize the weights')
inference.initialize(init, ctx=self.args.ctx)
else:
print('load the weights from path: %s' % self.args.model_path)
inference.load_parameters(self.args.model_path, self.args.ctx)
示例9: train
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def train(params, loader, model=None):
epoch = params.get('epoch', 10)
verbose = params.get("verbose", True)
batch_size = params.get("batch_size", 32)
if model is None:
class_name = params["class_name"]
layer_num = params.get("layer_num", 5)
class_num = params.get("class_num", 3)
s = params.get("s", 4)
b = params.get("b", 2)
yolo = Yolo(layer_num, class_num, s=s, b=b,class_name=class_name)
yolo.initialize(init=Xavier(magnitude=0.02))
else:
print("model load finish")
layer_num = model.layer_num
class_num = model.class_num
s = model.s
b = model.b
yolo = model
if verbose:
print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d \n\ts:%d \n\tb:%d" % \
(epoch, layer_num, class_num, s, b))
ngd = optimizer.SGD(momentum=0.7,learning_rate=0.005)
trainer = gluon.Trainer(yolo.collect_params(), ngd)
for ep in range(epoch):
loader.reset()
mean_loss = 0
t1 = time()
for i, batch in enumerate(loader):
x = batch.data[0]
y = batch.label[0].reshape((-1, 5))
y = translate_y(y, yolo.s, yolo.b, yolo.class_num)
y = nd.array(y)
with autograd.record():
loss_func = TotalLoss(s=s, c=class_num, b=b)
ypre = yolo(x) # (32,output_dim)
loss = nd.mean(loss_func(ypre, y))
mean_loss += loss.asscalar()
loss.backward()
trainer.step(batch_size)
t2 = time()
if verbose:
print("epoch:%d/%d loss:%.5f time:%4f" % (
ep + 1, epoch, mean_loss/32, t2 - t1),
flush=True)
print()
return yolo
示例10: train2
# 需要導入模塊: from mxnet import initializer [as 別名]
# 或者: from mxnet.initializer import Xavier [as 別名]
def train2(params, loader: BaseDataLoader, model=None):
epoch = params.get('epoch', 10)
verbose = params.get("verbose", True)
batch_size = params.get("batch_size", 32)
if model is None:
layer_num = params.get("layer_num", 5)
class_num = params.get("class_num", 3)
s = params.get("s", 4)
b = params.get("b", 2)
yolo = Yolo(layer_num, class_num, s=s, b=b)
yolo.initialize(init=Xavier(magnitude=0.02))
else:
print("model load finish")
layer_num = model.layer_num
class_num = model.class_num
s = model.s
b = model.b
yolo = model
if verbose:
print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d \n\ts:%d \n\tb:%d" % \
(epoch, layer_num, class_num, s, b))
ngd = optimizer.SGD(momentum=0.7,learning_rate=0.0025)
trainer = gluon.Trainer(yolo.collect_params(), ngd)
for ep in range(epoch):
loss = 0
all_batch = int(loader.data_number() / batch_size)
t1 = time()
for _ in range(all_batch):
x, y = loader.next_batch(batch_size)
with autograd.record():
loss_func = TotalLoss(s=s, c=class_num, b=b)
ypre = yolo(x) # (32,output_dim)
loss = nd.mean(loss_func(ypre, y))
loss.backward()
trainer.step(batch_size)
t2 = time()
if verbose:
print("epoch:%d/%d loss:%.5f time:%4f" % (
ep + 1, epoch, loss.asscalar(), t2 - t1),
flush=True)
return yolo