当前位置: 首页>>代码示例>>Python>>正文


Python initializer.Xavier方法代码示例

本文整理汇总了Python中mxnet.initializer.Xavier方法的典型用法代码示例。如果您正苦于以下问题:Python initializer.Xavier方法的具体用法?Python initializer.Xavier怎么用?Python initializer.Xavier使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.initializer的用法示例。


在下文中一共展示了initializer.Xavier方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _make_features

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def _make_features(self, layers, filters, batch_norm):
        featurizer = nn.HybridSequential(prefix='')
        for i, num in enumerate(layers):
            for _ in range(num):
                featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros',
                                         #name = 'conv%s_%s'%(str(i+1),str(_+1))\
                                         ))
                if batch_norm:
                    featurizer.add(nn.BatchNorm())
                featurizer.add(nn.Activation('relu'))
            featurizer.add(nn.MaxPool2D(strides=2))
        return featurizer 
开发者ID:zzdang,项目名称:cascade_rcnn_gluon,代码行数:18,代码来源:vgg16_pruned.py

示例2: _make_features

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def _make_features(self, layers, filters, batch_norm):
        featurizer = mx.gluon.nn.HybridSequential(prefix='')
        for i, num in enumerate(layers):
            for _ in range(num):
                featurizer.add(Conv2D(filters[i], kernel_size=3, padding=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros'))
                if batch_norm:
                    featurizer.add(BatchNorm())
                featurizer.add(Activation('relu'))
            featurizer.add(MaxPool2D(strides=2))
        return featurizer 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:16,代码来源:vgg.py

示例3: _init_weight

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def _init_weight(self, name, arr):
        if name in self._kwargs.keys():
            init_params = self._kwargs[name]
            for (k, v) in init_params.items():
                if k.lower() == "normal":
                    random.normal(0, v, out=arr)
                elif k.lower() == "uniform":
                    random.uniform(-v, v, out=arr)
                elif k.lower() == "orthogonal":
                    raise NotImplementedError("Not support at the moment")
                elif k.lower() == "xavier":
                    xa = Xavier(v[0], v[1], v[2])
                    xa(name, arr)
        else:
            raise NotImplementedError("Not support") 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:17,代码来源:text_cnn.py

示例4: __init__

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def __init__(self, freeze=False, batch_norm=False, **kwargs):
        super(E2FAR, self).__init__(**kwargs)
        with self.name_scope():
            self.layers = [2, 2, 3, 3]
            self.filters = [64, 128, 256, 512]
            self.hidden_units = [4096, 1024]
            self.backbone = self._make_features([2, 2, 3, 3], [64, 128, 256, 512], batch_norm)
            self.extra_backbone = self._make_features([3], [512], batch_norm)
            self.conv6 = gluon.nn.Conv2D(512, kernel_size=5, strides=2, padding=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros')
            self.conv7 = gluon.nn.Conv2D(512, kernel_size=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros')
            self.conv8 = gluon.nn.Conv2D(512, kernel_size=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros')
            self.shape_regressor = self._make_prediction(out_dim=199)
            self.exp_regressor = self._make_prediction(out_dim=29)

        if freeze:
            for _, w in self.backbone.collect_params().items():
                w.grad_req = 'null'

            for _, w in self.extra_backbone.collect_params().items():
                w.grad_req = 'null' 
开发者ID:ShownX,项目名称:mxnet-E2FAR,代码行数:34,代码来源:model.py

示例5: _make_features

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def _make_features(self, layers, filters, batch_norm):
        featurizer = gluon.nn.HybridSequential(prefix='')
        for i, num in enumerate(layers):
            for _ in range(num):
                featurizer.add(gluon.nn.Conv2D(filters[i], kernel_size=3, padding=1,
                                               weight_initializer=Xavier(rnd_type='gaussian',
                                                                         factor_type='out',
                                                                         magnitude=2),
                                               bias_initializer='zeros'))
                if batch_norm:
                    featurizer.add(gluon.nn.BatchNorm())
                featurizer.add(gluon.nn.Activation('relu'))
            featurizer.add(gluon.nn.MaxPool2D(strides=2))
        return featurizer 
开发者ID:ShownX,项目名称:mxnet-E2FAR,代码行数:16,代码来源:model.py

示例6: __init__

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def __init__(self, layers, filters, batch_norm=False, **kwargs):
        super(VGGAtrousBase, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        self.init = {
            'weight_initializer': Xavier(
                rnd_type='gaussian', factor_type='out', magnitude=2),
            'bias_initializer': 'zeros'
        }
        with self.name_scope():
            # we use pre-trained weights from caffe, initial scale must change
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale', init_scale)
            self.stages = nn.HybridSequential()
            for l, f in zip(layers, filters):
                stage = nn.HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(l):
                        stage.add(nn.Conv2D(f, kernel_size=3, padding=1, **self.init))
                        if batch_norm:
                            stage.add(nn.BatchNorm())
                        stage.add(nn.Activation('relu'))
                self.stages.add(stage)

            # use dilated convolution instead of dense layers
            stage = nn.HybridSequential(prefix='dilated_')
            with stage.name_scope():
                stage.add(nn.Conv2D(1024, kernel_size=3, padding=6, dilation=6, **self.init))
                if batch_norm:
                    stage.add(nn.BatchNorm())
                stage.add(nn.Activation('relu'))
                stage.add(nn.Conv2D(1024, kernel_size=1, **self.init))
                if batch_norm:
                    stage.add(nn.BatchNorm())
                stage.add(nn.Activation('relu'))
            self.stages.add(stage)

            # normalize layer for 4-th stage
            self.norm4 = Normalize(filters[3], 20) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:40,代码来源:vgg_atrous.py

示例7: _make_features

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def _make_features(self, layers, filters, batch_norm):
        featurizer = nn.HybridSequential(prefix='')
        for i, num in enumerate(layers):
            for _ in range(num):
                featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros'))
                if batch_norm:
                    featurizer.add(nn.BatchNorm())
                featurizer.add(nn.Activation('relu'))
            featurizer.add(nn.MaxPool2D(strides=2))
        return featurizer 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:16,代码来源:vgg.py

示例8: load_params

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def load_params(self, inference, init=initializer.Uniform(), postfix='epoch'):
        """
        load the parameters
        :param inference: network
        :param init: initializer function
        :param postfix: postfix
        :return:
        """
        if self.args.training:
            if self.args.pretrained:
                # print('load the weights from path: %s' % self.args.model_path)
                print('load the weights for features from path: %s' % self.args.model_path)
                inference.features.load_parameters(self.args.model_path, self.args.ctx, ignore_extra=True)
                print('initialize the weights for embeds and output')
                inference.embeds.initialize(init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
                inference.output.initialize(init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
            elif self.args.model_path.endswith('.params'):
                print('load the weights from path: %s' % self.args.model_path)
                inference.load_parameters(self.args.model_path, self.args.ctx)
            elif self.args.start_epoch > 0:
                print('load the weights from path: %s' % os.path.join(self.args.ckpt, '%s-%s-%04d.params'
                                                                      % (self.args.bb, postfix, 0)))
                inference.load_parameters(os.path.join(self.args.ckpt, '%s-%s-%04d.params' %
                                          (self.args.bb, postfix, 0)), self.args.ctx)
            else:
                print('Initialize the weights')
                inference.initialize(init, ctx=self.args.ctx)
        else:
            print('load the weights from path: %s' % self.args.model_path)
            inference.load_parameters(self.args.model_path, self.args.ctx) 
开发者ID:aws-samples,项目名称:d-SNE,代码行数:32,代码来源:training_sda.py

示例9: train

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def train(params, loader, model=None):
    epoch = params.get('epoch', 10)
    verbose = params.get("verbose", True)
    batch_size = params.get("batch_size", 32)
    if model is None:
        class_name = params["class_name"]
        layer_num = params.get("layer_num", 5)
        class_num = params.get("class_num", 3)
        s = params.get("s", 4)
        b = params.get("b", 2)
        yolo = Yolo(layer_num, class_num, s=s, b=b,class_name=class_name)

        yolo.initialize(init=Xavier(magnitude=0.02))
    else:
        print("model load finish")
        layer_num = model.layer_num
        class_num = model.class_num
        s = model.s
        b = model.b
        yolo = model
    if verbose:
        print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d  \n\ts:%d  \n\tb:%d" % \
              (epoch, layer_num, class_num, s, b))

    ngd = optimizer.SGD(momentum=0.7,learning_rate=0.005)
    trainer = gluon.Trainer(yolo.collect_params(), ngd)

    for ep in range(epoch):
        loader.reset()
        mean_loss = 0
        t1 = time()
        for i, batch in enumerate(loader):
            x = batch.data[0]
            y = batch.label[0].reshape((-1, 5))
            y = translate_y(y, yolo.s, yolo.b, yolo.class_num)
            y = nd.array(y)
            with autograd.record():
                loss_func = TotalLoss(s=s, c=class_num, b=b)
                ypre = yolo(x)  # (32,output_dim)
                loss = nd.mean(loss_func(ypre, y))
                mean_loss += loss.asscalar()
            loss.backward()
            trainer.step(batch_size)
        t2 = time()
        if verbose:
            print("epoch:%d/%d  loss:%.5f  time:%4f" % (
                ep + 1, epoch, mean_loss/32, t2 - t1),
                  flush=True)

        print()
    return yolo 
开发者ID:MashiMaroLjc,项目名称:YOLO,代码行数:53,代码来源:model.py

示例10: train2

# 需要导入模块: from mxnet import initializer [as 别名]
# 或者: from mxnet.initializer import Xavier [as 别名]
def train2(params, loader: BaseDataLoader, model=None):
    epoch = params.get('epoch', 10)
    verbose = params.get("verbose", True)
    batch_size = params.get("batch_size", 32)
    if model is None:
        layer_num = params.get("layer_num", 5)
        class_num = params.get("class_num", 3)
        s = params.get("s", 4)
        b = params.get("b", 2)
        yolo = Yolo(layer_num, class_num, s=s, b=b)

        yolo.initialize(init=Xavier(magnitude=0.02))
    else:
        print("model load finish")
        layer_num = model.layer_num
        class_num = model.class_num
        s = model.s
        b = model.b
        yolo = model
    if verbose:
        print("train params: \n\tepoch:%d \n\tlayer_num:%d \n\tclass_num:%d  \n\ts:%d  \n\tb:%d" % \
              (epoch, layer_num, class_num, s, b))

    ngd = optimizer.SGD(momentum=0.7,learning_rate=0.0025)
    trainer = gluon.Trainer(yolo.collect_params(), ngd)

    for ep in range(epoch):
        loss = 0
        all_batch = int(loader.data_number() / batch_size)
        t1 = time()
        for _ in range(all_batch):
            x, y = loader.next_batch(batch_size)
            with autograd.record():
                loss_func = TotalLoss(s=s, c=class_num, b=b)
                ypre = yolo(x)  # (32,output_dim)
                loss = nd.mean(loss_func(ypre, y))
            loss.backward()
            trainer.step(batch_size)

        t2 = time()
        if verbose:
            print("epoch:%d/%d  loss:%.5f  time:%4f" % (
                ep + 1, epoch, loss.asscalar(), t2 - t1),
                  flush=True)

    return yolo 
开发者ID:MashiMaroLjc,项目名称:YOLO,代码行数:48,代码来源:model.py


注:本文中的mxnet.initializer.Xavier方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。