当前位置: 首页>>代码示例>>Python>>正文


Python models.vgg11方法代码示例

本文整理汇总了Python中torchvision.models.vgg11方法的典型用法代码示例。如果您正苦于以下问题:Python models.vgg11方法的具体用法?Python models.vgg11怎么用?Python models.vgg11使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.models的用法示例。


在下文中一共展示了models.vgg11方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def __init__(self, num_classes=1, num_filters=32):
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)
        self.encoder = models.vgg11(pretrained=True).features
        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) 
开发者ID:asanakoy,项目名称:kaggle_carvana_segmentation,代码行数:24,代码来源:unet_models.py

示例2: vgg11

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def vgg11(num_classes=1000, pretrained='imagenet'):
    """VGG 11-layer model (configuration "A")
    """
    model = models.vgg11(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg11'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_vggs(model)
    return model 
开发者ID:alexandonian,项目名称:pretorched-x,代码行数:11,代码来源:torchvision_models.py

示例3: __init__

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def __init__(self, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.vgg11(pretrained=pretrained).features

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, 1, kernel_size=1) 
开发者ID:sneddy,项目名称:pneumothorax-segmentation,代码行数:33,代码来源:ternausnets.py

示例4: _load_pytorch_model

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def _load_pytorch_model(model_name, summary):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    import torch
    if torch.cuda.is_available():
        _model = _model.cuda()
    from perceptron.models.classification.pytorch import PyTorchModel as ClsPyTorchModel
    import numpy as np
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    pmodel = ClsPyTorchModel(
        _model, bounds=(
            0, 1), num_classes=1000, preprocessing=(
            mean, std))
    return pmodel 
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:41,代码来源:tools.py

示例5: load_pytorch_model

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def load_pytorch_model(model_name):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    return _model 
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:30,代码来源:tools.py

示例6: _init_modules

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def _init_modules(self):
        vgg = models.vgg11()
        if self.pretrained:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()})

        vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])

        # not using the last maxpool layer
        self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])

        # Fix the layers before conv3:
        for layer in range(7):
            for p in self.RCNN_base[layer].parameters(): p.requires_grad = False

        # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)

        self.RCNN_top = vgg.classifier

        # not using the last maxpool layer
        self.RCNN_cls_score = nn.Linear(4096, self.n_classes)

        self.stu_feature_adap = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1),
                                              nn.ReLU())

        if self.class_agnostic:
            self.RCNN_bbox_pred = nn.Linear(4096, 4)
        else:
            self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes) 
开发者ID:twangnh,项目名称:Distilling-Object-Detectors,代码行数:32,代码来源:vgg11.py

示例7: getNetwork

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def getNetwork(args):
    if (args.net_type == 'alexnet'):
        net = models.alexnet(pretrained=args.finetune)
        file_name = 'alexnet'
    elif (args.net_type == 'vggnet'):
        if(args.depth == 11):
            net = models.vgg11(pretrained=args.finetune)
        elif(args.depth == 13):
            net = models.vgg13(pretrained=args.finetune)
        elif(args.depth == 16):
            net = models.vgg16(pretrained=args.finetune)
        elif(args.depth == 19):
            net = models.vgg19(pretrained=args.finetune)
        else:
            print('Error : VGGnet should have depth of either [11, 13, 16, 19]')
            sys.exit(1)
        file_name = 'vgg-%s' %(args.depth)
    elif (args.net_type == 'squeezenet'):
        net = models.squeezenet1_0(pretrained=args.finetune)
        file_name = 'squeeze'
    elif (args.net_type == 'resnet'):
        net = resnet(args.finetune, args.depth)
        file_name = 'resnet-%s' %(args.depth)
    elif (args.net_type == 'inception'):
        net = pretrainedmodels.inceptionv3(num_classes=1000, pretrained='imagenet')
        file_name = 'inception-v3'
    elif (args.net_type == 'xception'):
        net = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet')
        file_name = 'xception'
    else:
        print('Error : Network should be either [alexnet / squeezenet / vggnet / resnet]')
        sys.exit(1)

    return net, file_name 
开发者ID:meliketoy,项目名称:fine-tuning.pytorch,代码行数:36,代码来源:main.py

示例8: __init__

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def __init__(self, num_classes=1, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.vgg11(pretrained=pretrained).features

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) 
开发者ID:minerva-ml,项目名称:open-solution-data-science-bowl-2018,代码行数:33,代码来源:unet_models.py

示例9: vgg11

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def vgg11(num_classes=1000, pretrained='imagenet'):
    """VGG 11-layer model (configuration "A")
    """
    model = models.vgg11(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg11'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model 
开发者ID:CeLuigi,项目名称:models-comparison.pytorch,代码行数:10,代码来源:torchvision_models.py

示例10: getNetwork

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def getNetwork(args):
    if (args.net_type == 'alexnet'):
        net = models.alexnet(pretrained=args.finetune)
        file_name = 'alexnet'
    elif (args.net_type == 'vggnet'):
        if(args.depth == 11):
            net = models.vgg11(pretrained=args.finetune)
        elif(args.depth == 13):
            net = models.vgg13(pretrained=args.finetune)
        elif(args.depth == 16):
            net = models.vgg16(pretrained=args.finetune)
        elif(args.depth == 19):
            net = models.vgg19(pretrained=args.finetune)
        else:
            print('Error : VGGnet should have depth of either [11, 13, 16, 19]')
            sys.exit(1)
        file_name = 'vgg-%s' %(args.depth)
    elif (args.net_type == 'resnet'):
        net = resnet(args.finetune, args.depth)

        file_name = 'resnet-%s' %(args.depth)
    else:
        print('Error : Network should be either [alexnet / vggnet / resnet / densenet]')
        sys.exit(1)

    return net, file_name 
开发者ID:meliketoy,项目名称:gradcam.pytorch,代码行数:28,代码来源:main.py

示例11: __init__

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def __init__(self, num_filters: int = 32, pretrained: bool = False) -> None:
        """

        Args:
            num_filters:
            pretrained:
                False - no pre-trained network is used
                True  - encoder is pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.vgg11(pretrained=pretrained).features

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(
            num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8
        )
        self.dec5 = DecoderBlock(
            num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8
        )
        self.dec4 = DecoderBlock(
            num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4
        )
        self.dec3 = DecoderBlock(
            num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2
        )
        self.dec2 = DecoderBlock(
            num_filters * (4 + 2), num_filters * 2 * 2, num_filters
        )
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, 1, kernel_size=1) 
开发者ID:ternaus,项目名称:TernausNet,代码行数:44,代码来源:models.py

示例12: __init__

# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg11 [as 别名]
def __init__(self, num_classes=1, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network used
            vgg - encoder pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.num_classes = num_classes

        if pretrained == 'vgg':
            self.encoder = models.vgg11(pretrained=True).features
        else:
            self.encoder = models.vgg11(pretrained=False).features

        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Sequential(self.encoder[0],
                                   self.relu)

        self.conv2 = nn.Sequential(self.encoder[3],
                                   self.relu)

        self.conv3 = nn.Sequential(
            self.encoder[6],
            self.relu,
            self.encoder[8],
            self.relu,
        )
        self.conv4 = nn.Sequential(
            self.encoder[11],
            self.relu,
            self.encoder[13],
            self.relu,
        )

        self.conv5 = nn.Sequential(
            self.encoder[16],
            self.relu,
            self.encoder[18],
            self.relu,
        )

        self.center = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
        self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
        self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 4, is_deconv=True)
        self.dec3 = DecoderBlock(256 + num_filters * 4, num_filters * 4 * 2, num_filters * 2, is_deconv=True)
        self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv=True)
        self.dec1 = ConvRelu(64 + num_filters, num_filters)

        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) 
开发者ID:BloodAxe,项目名称:segmentation-networks-benchmark,代码行数:55,代码来源:unet11.py


注:本文中的torchvision.models.vgg11方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。