當前位置: 首頁>>代碼示例>>Python>>正文


Python model_zoo.get_model方法代碼示例

本文整理匯總了Python中gluoncv.model_zoo.get_model方法的典型用法代碼示例。如果您正苦於以下問題:Python model_zoo.get_model方法的具體用法?Python model_zoo.get_model怎麽用?Python model_zoo.get_model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在gluoncv.model_zoo的用法示例。


在下文中一共展示了model_zoo.get_model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def __init__(self,
                 base_network='resnet18_v1b',
                 deconv_filters=(256, 128, 64),
                 deconv_kernels=(4, 4, 4),
                 pretrained_backbone=True,
                 in_channels=3,
                 **kwargs):
        super(DeconvResnet, self).__init__(**kwargs)
        assert 'resnet' in base_network
        from gluoncv.model_zoo import get_model
        net = get_model(base_network, pretrained=pretrained_backbone)
        feat = nn.HybridSequential()
        feat.add(*[net.conv1,
                   net.bn1,
                   net.relu,
                   net.maxpool,
                   net.layer1,
                   net.layer2,
                   net.layer3,
                   net.layer4])
        self.base_network = feat
        with self.name_scope():
            self.deconv = self._make_deconv_layer(deconv_filters, deconv_kernels) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:25,代碼來源:oth_centernet2.py

示例2: reset_class

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def reset_class(self, classes, reuse_weights=None):
        """Reset class categories and class predictors.

        Parameters
        ----------
        classes : iterable of str
            The new categories. ['apple', 'orange'] for example.
        reuse_weights : dict
            A {new_integer : old_integer} or mapping dict or {new_name : old_name} mapping dict,
            or a list of [name0, name1,...] if class names don't change.
            This allows the new predictor to reuse the
            previously trained weights specified.

        Example
        -------
        >>> net = gluoncv.model_zoo.get_model('faster_rcnn_resnet50_v1b_coco', pretrained=True)
        >>> # use direct name to name mapping to reuse weights
        >>> net.reset_class(classes=['person'], reuse_weights={'person':'person'})
        >>> # or use interger mapping, person is the 14th category in VOC
        >>> net.reset_class(classes=['person'], reuse_weights={0:14})
        >>> # you can even mix them
        >>> net.reset_class(classes=['person'], reuse_weights={'person':14})
        >>> # or use a list of string if class name don't change
        >>> net.reset_class(classes=['person'], reuse_weights=['person'])

        """
        super(FasterRCNN, self).reset_class(classes, reuse_weights)
        self._target_generator = RCNNTargetGenerator(self.num_class, self.sampler._max_pos,
                                                     self._batch_size) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:31,代碼來源:faster_rcnn.py

示例3: main

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def main():
    """SiamRPN test.

    function
    ----------
        record the output of the model. The output information of each video is recorded in the txt
    corresponding to the video name.
        if you want to evaluation, you need to python benchmark.py according to txt of text result.
        Currently only supports test OTB 2015 dataset

    Parameters
    ----------
    dataset_root : str, default '~/mxnet/datasets/OTB2015'
                   Path to folder test the dataset.
    model_path :   str, Path of test model .
    results_path:  str, Path to store txt of test reslut .
    """
    opt = parse_args()
    if opt.use_gpu:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu()
    # dataloader
    dataset = OTBDataset(name=opt.dataset, dataset_root=opt.dataset_root, load_img=False)
    net = get_model(opt.model_name, pretrained=True)
    net.collect_params().reset_ctx(ctx)
    if opt.mode == 'hybrid':
        net.hybridize(static_alloc=True, static_shape=True)
    if opt.model_path:
        net.load_parameters(opt.model_path, ctx=ctx)
        print('Pre-trained model %s is successfully loaded.' % (opt.model_path))
    else:
        print('Pre-trained model is successfully loaded from the model zoo.')
    # bulid tracker
    tracker = build_tracker(net)
    # record the output of the model.
    test(dataset, tracker, opt, ctx) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:39,代碼來源:test.py

示例4: oth_resnet50_v1d

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_resnet50_v1d(pretrained=False, **kwargs):
    from gluoncv.model_zoo import get_model
    net = get_model(
        'resnet50_v1d',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:10,代碼來源:oth_simple_pose_resnet.py

示例5: oth_resnet101_v1d

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_resnet101_v1d(pretrained=False, **kwargs):
    from gluoncv.model_zoo import get_model
    net = get_model(
        'resnet101_v1d',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:10,代碼來源:oth_simple_pose_resnet.py

示例6: oth_resnet152_v1d

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_resnet152_v1d(pretrained=False, **kwargs):
    from gluoncv.model_zoo import get_model
    net = get_model(
        'resnet152_v1d',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:10,代碼來源:oth_simple_pose_resnet.py

示例7: oth_mobilenet_v2_1_0

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_mobilenet_v2_1_0(pretrained=False, **kwargs):
    if "in_channels" in kwargs:
        del kwargs["in_channels"]
    from gluoncv.model_zoo import get_model
    net = get_model(
        'mobilenetv2_1.0',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:12,代碼來源:oth_simple_pose_resnet.py

示例8: oth_mobilenet_v2_0_5

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_mobilenet_v2_0_5(pretrained=False, **kwargs):
    if "in_channels" in kwargs:
        del kwargs["in_channels"]
    from gluoncv.model_zoo import get_model
    net = get_model(
        'mobilenetv2_0.5',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:12,代碼來源:oth_simple_pose_resnet.py

示例9: oth_mobilenet_v2_0_25

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def oth_mobilenet_v2_0_25(pretrained=False, **kwargs):
    if "in_channels" in kwargs:
        del kwargs["in_channels"]
    from gluoncv.model_zoo import get_model
    net = get_model(
        'mobilenetv2_0.25',
        pretrained=pretrained,
        **kwargs)
    net.in_size = (224, 224)
    return net 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:12,代碼來源:oth_simple_pose_resnet.py

示例10: __init__

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def __init__(self,
                 base_network='resnet18_v1b',
                 deconv_filters=(256, 128, 64),
                 deconv_kernels=(4, 4, 4),
                 pretrained_base=True,
                 norm_layer=nn.BatchNorm,
                 norm_kwargs=None,
                 use_dcnv2=False,
                 in_channels=3,
                 classes=1000,
                 **kwargs):
        super(DeconvResnet, self).__init__(**kwargs)
        assert 'resnet' in base_network
        from gluoncv.model_zoo import get_model
        net = get_model(base_network, pretrained=pretrained_base)
        self._norm_layer = norm_layer
        self._norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
        self._use_dcnv2 = use_dcnv2
        if 'v1b' in base_network:
            feat = nn.HybridSequential()
            feat.add(*[net.conv1,
                       net.bn1,
                       net.relu,
                       net.maxpool,
                       net.layer1,
                       net.layer2,
                       net.layer3,
                       net.layer4])
            self.base_network = feat
        else:
            raise NotImplementedError()
        with self.name_scope():
            self.deconv = self._make_deconv_layer(deconv_filters, deconv_kernels) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:35,代碼來源:oth_centernet.py

示例11: __init__

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def __init__(self,
                 base_name,
                 base_attrs=('features',),
                 num_joints=17,
                 fixed_size=True,
                 pretrained_base=False,
                 pretrained_ctx=cpu(),
                 in_channels=3,
                 in_size=(256, 192),
                 **kwargs):
        super(MobilePose, self).__init__(**kwargs)
        assert (in_channels == 3)
        self.in_size = in_size

        with self.name_scope():
            from gluoncv.model_zoo import get_model
            base_model = get_model(base_name, pretrained=pretrained_base,
                                   ctx=pretrained_ctx)
            self.features = nn.HybridSequential()
            if base_name.startswith('mobilenetv2'):
                self.features.add(base_model.features[:-1])
            elif base_name.startswith('mobilenetv3'):
                self.features.add(base_model.features[:-4])
            elif base_name.startswith('mobilenet'):
                self.features.add(base_model.features[:-2])
            else:
                for layer in base_attrs:
                    self.features.add(getattr(base_model, layer))

            self.upsampling = nn.HybridSequential()
            self.upsampling.add(
                nn.Conv2D(256, 1, 1, 0, use_bias=False),
                DUC(512, 2),
                DUC(256, 2),
                DUC(128, 2),
                nn.Conv2D(num_joints, 1, use_bias=False,
                          weight_initializer=initializer.Normal(0.001)),
            ) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:40,代碼來源:oth_mobile_pose.py

示例12: convert_from_gluon

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def convert_from_gluon(model_name, image_shape, classes=1000, logger=None):
    dir_path = os.path.dirname(os.path.realpath(__file__))
    model_path = os.path.join(dir_path, 'model')
    if logger is not None:
        logger.info('Converting model from Gluon-CV ModelZoo %s... into path %s' % (model_name, model_path))
    net = get_model(name=model_name, classes=classes, pretrained=True)
    net.hybridize()
    x = mx.sym.var('data')
    y = net(x)
    y = mx.sym.SoftmaxOutput(data=y, name='softmax')
    symnet = mx.symbol.load_json(y.tojson())
    params = net.collect_params()
    args = {}
    auxs = {}    
    for param in params.values():
        v = param._reduce()
        k = param.name
        if 'running' in k:
            auxs[k] = v
        else:
            args[k] = v            
    mod = mx.mod.Module(symbol=symnet, context=mx.cpu(),
                        label_names = ['softmax_label'])
    mod.bind(for_training=False, 
             data_shapes=[('data', (1,) + 
                          tuple([int(i) for i in image_shape.split(',')]))])
    mod.set_params(arg_params=args, aux_params=auxs)
    dst_dir = os.path.join(dir_path, 'model')
    prefix = os.path.join(dir_path, 'model', model_name)
    if not os.path.isdir(dst_dir):
        os.mkdir(dst_dir)       
    mod.save_checkpoint(prefix, 0)
    return prefix 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:35,代碼來源:imagenet_gen_qsym_mkldnn.py

示例13: test

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def test(images, dims, threshold, plt_hw, seq_len, no_yolo, beam, beam_size, context):
    print("Loading model...")
    if not no_yolo:
        yolo = model_zoo.get_model('yolo3_darknet53_voc', pretrained=True, ctx=context)
    wpod = WpodNet()
    wpod.load_parameters("model/wpod_net.params", ctx=context)
    vocab = Vocabulary()
    vocab.load("model/vocabulary.json")
    ocr = OcrNet(plt_hw, vocab.size(), seq_len)
    ocr.load_parameters("model/ocr_net.params", ctx=context)
    for path in images:
        print(path)
        raw = load_image(path)
        if no_yolo:
            detect_plate(wpod, vocab, ocr, raw, dims, threshold, plt_hw, beam, beam_size, context)
        else:
            ts = time.time()
            x, _ = data.transforms.presets.yolo.transform_test(raw, short=512)
            classes, scores, bboxes = yolo(x.as_in_context(context))
            bboxes[0, :, 0::2] = bboxes[0, :, 0::2] / x.shape[3] * raw.shape[1]
            bboxes[0, :, 1::2] = bboxes[0, :, 1::2] / x.shape[2] * raw.shape[0]
            vehicles = [
                fixed_crop(raw, bboxes[0, i]) for i in range(classes.shape[1])
                    if (yolo.classes[int(classes[0, i].asscalar())] == 'car' or
                        yolo.classes[int(classes[0, i].asscalar())] == 'bus') and
                        scores[0, i].asscalar() > 0.5
            ]
            print("yolo profiling: %f" % (time.time() - ts))
            for i, raw in enumerate(vehicles):
                print("vehicle[%d]:" % i)
                detect_plate(wpod, vocab, ocr, raw, dims, threshold, plt_hw, beam, beam_size, context) 
開發者ID:ufownl,項目名稱:alpr_utils,代碼行數:33,代碼來源:test.py

示例14: faster_rcnn_resnet50_v1b_custom

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def faster_rcnn_resnet50_v1b_custom(classes, transfer=None, pretrained_base=True,
                                    pretrained=False, **kwargs):
    r"""Faster RCNN model with resnet50_v1b base network on custom dataset.

    Parameters
    ----------
    classes : iterable of str
        Names of custom foreground classes. `len(classes)` is the number of foreground classes.
    transfer : str or None
        If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
        on other datasets.
    pretrained : bool or str
        Boolean value controls whether to load the default pretrained weights for model.
        String value represents the hashtag for a certain version of pretrained weights.
    pretrained_base : bool or str
        Boolean value controls whether to load the default pretrained weights for model.
        String value represents the hashtag for a certain version of pretrained weights.
    ctx : Context, default CPU
        The context in which to load the pretrained weights.
    root : str, default '~/.mxnet/models'
        Location for keeping the model parameters.

    Returns
    -------
    mxnet.gluon.HybridBlock
        Hybrid faster RCNN network.
    """
    if pretrained:
        warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
    if transfer is None:
        from gluoncv.model_zoo.resnetv1b import resnet50_v1b
        base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
                                    use_global_stats=True, **kwargs)
        features = nn.HybridSequential()
        top_features = nn.HybridSequential()
        for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
            features.add(getattr(base_network, layer))
        for layer in ['layer4']:
            top_features.add(getattr(base_network, layer))
        train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
                                   '.*layers(2|3|4)_conv'])
        return get_faster_rcnn(
            name='resnet50_v1b', dataset='custom', pretrained=pretrained,
            features=features, top_features=top_features, classes=classes,
            short=600, max_size=1000, train_patterns=train_patterns,
            nms_thresh=0.7, nms_topk=400, post_nms=100,
            roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
            rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
            ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
            rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
            rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
            num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=3000,
            **kwargs)
    else:
        from gluoncv.model_zoo import get_model
        net = get_model('faster_rcnn_resnet50_v1b_' + str(transfer), pretrained=True, **kwargs)
        reuse_classes = [x for x in classes if x in net.classes]
        net.reset_class(classes, reuse_weights=reuse_classes)
    return net 
開發者ID:dmlc,項目名稱:dgl,代碼行數:61,代碼來源:faster_rcnn.py

示例15: resnet50_v1b_custom

# 需要導入模塊: from gluoncv import model_zoo [as 別名]
# 或者: from gluoncv.model_zoo import get_model [as 別名]
def resnet50_v1b_custom(nclass=400, pretrained=False, pretrained_base=True,
                        use_tsn=False, partial_bn=False,
                        num_segments=1, num_crop=1, root='~/.mxnet/models',
                        ctx=mx.cpu(), use_kinetics_pretrain=True, **kwargs):
    r"""ResNet50 model customized for any dataset.

    Parameters
    ----------
    nclass : int.
        Number of categories in the dataset.
    pretrained : bool or str.
        Boolean value controls whether to load the default pretrained weights for model.
        String value represents the hashtag for a certain version of pretrained weights.
    pretrained_base : bool or str, optional, default is True.
        Load pretrained base network, the extra layers are randomized. Note that
        if pretrained is `True`, this has no effect.
    ctx : Context, default CPU.
        The context in which to load the pretrained weights.
    root : str, default $MXNET_HOME/models
        Location for keeping the model parameters.
    num_segments : int, default is 1.
        Number of segments used to evenly divide a video.
    num_crop : int, default is 1.
        Number of crops used during evaluation, choices are 1, 3 or 10.
    partial_bn : bool, default False.
        Freeze all batch normalization layers during training except the first layer.
    use_kinetics_pretrain : bool, default True.
        Whether to load pretrained weights on Kinetics400 dataset as model initialization.
    """
    model = ActionRecResNetV1b(depth=50,
                               nclass=nclass,
                               partial_bn=partial_bn,
                               num_segments=num_segments,
                               num_crop=num_crop,
                               dropout_ratio=0.5,
                               init_std=0.01)

    if use_kinetics_pretrain and not pretrained:
        from gluoncv.model_zoo import get_model
        kinetics_model = get_model('resnet50_v1b_kinetics400', nclass=400, pretrained=True)
        source_params = kinetics_model.collect_params()
        target_params = model.collect_params()
        assert len(source_params.keys()) == len(target_params.keys())

        pretrained_weights = []
        for layer_name in source_params.keys():
            pretrained_weights.append(source_params[layer_name].data())

        for i, layer_name in enumerate(target_params.keys()):
            if i + 2 == len(source_params.keys()):
                # skip the last dense layer
                break
            target_params[layer_name].set_data(pretrained_weights[i])
    model.collect_params().reset_ctx(ctx)
    return model 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:57,代碼來源:actionrec_resnetv1b.py


注:本文中的gluoncv.model_zoo.get_model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。