当前位置: 首页>>代码示例>>Python>>正文


Python vgg.vgg16方法代码示例

本文整理汇总了Python中torchvision.models.vgg.vgg16方法的典型用法代码示例。如果您正苦于以下问题:Python vgg.vgg16方法的具体用法?Python vgg.vgg16怎么用?Python vgg.vgg16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torchvision.models.vgg的用法示例。


在下文中一共展示了vgg.vgg16方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_model

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def build_model(self):
        self.netG = Generator(n_residual_blocks=self.num_residuals, upsample_factor=self.upscale_factor, base_filter=64, num_channel=1).to(self.device)
        self.netD = Discriminator(base_filter=64, num_channel=1).to(self.device)
        self.feature_extractor = vgg16(pretrained=True)
        self.netG.weight_init(mean=0.0, std=0.2)
        self.netD.weight_init(mean=0.0, std=0.2)
        self.criterionG = nn.MSELoss()
        self.criterionD = nn.BCELoss()
        torch.manual_seed(self.seed)

        if self.GPU_IN_USE:
            torch.cuda.manual_seed(self.seed)
            self.feature_extractor.cuda()
            cudnn.benchmark = True
            self.criterionG.cuda()
            self.criterionD.cuda()

        self.optimizerG = optim.Adam(self.netG.parameters(), lr=self.lr, betas=(0.9, 0.999))
        self.optimizerD = optim.SGD(self.netD.parameters(), lr=self.lr / 100, momentum=0.9, nesterov=True)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerG, milestones=[50, 75, 100], gamma=0.5)  # lr decay
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerD, milestones=[50, 75, 100], gamma=0.5)  # lr decay 
开发者ID:icpm,项目名称:super-resolution,代码行数:23,代码来源:solver.py

示例2: __init__

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def __init__(self):
        super(GeneratorLoss, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = TVLoss() 
开发者ID:amanchadha,项目名称:iSeeBetter,代码行数:11,代码来源:loss.py

示例3: vgg16

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def vgg16(*args, **kwargs):
    pretrained = False
    if 'pretrained' in kwargs:
        pretrained = kwargs['pretrained']
        kwargs['pretrained'] = False
    base_vgg = vgg(*args, **kwargs)
    conv_fc6 = nn.Conv2d(in_channels=512,
                         out_channels=4096,
                         kernel_size=7,
                         padding=3)

    conv_fc7 = nn.Conv2d(in_channels=4096,
                         out_channels=4096,
                         kernel_size=1,
                         padding=0)

    conv_fc8 = nn.Conv2d(in_channels=4096,
                         out_channels=2688,
                         kernel_size=1,
                         padding=0)

    fconv_layers = []
    for layer in (conv_fc6, conv_fc7, conv_fc8):
        fconv_layers += [layer, nn.ReLU(), nn.Dropout(p=0.2)]
    base_vgg = list(base_vgg.children())[:-1]
    base_vgg += fconv_layers
    model = nn.Sequential(*base_vgg)
    if pretrained:
        state_dict = model.state_dict()
        pretrained_state = model_zoo.load_url(VGG16_URL)
        for layer_name in pretrained_state:
            if layer_name in state_dict:
                state_dict[layer_name] = pretrained_state[layer_name]
        model.load_state_dict(state_dict)
    return model 
开发者ID:BCV-Uniandes,项目名称:DMS,代码行数:37,代码来源:model_factory.py

示例4: __init__

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def __init__(self):
        super(PerceptualLossVGG16, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss() 
开发者ID:ManuelFritsche,项目名称:real-world-sr,代码行数:10,代码来源:loss.py

示例5: vgg

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def vgg(**config):
    dataset = config.pop('dataset', 'imagenet')
    depth = config.pop('depth', 16)
    bn = config.pop('bn', True)

    if dataset == 'imagenet':
        config.setdefault('num_classes', 1000)
        if depth == 11:
            if bn is False:
                return vgg11(pretrained=False, **config)
            else:
                return vgg11_bn(pretrained=False, **config)
        if depth == 13:
            if bn is False:
                return vgg13(pretrained=False, **config)
            else:
                return vgg13_bn(pretrained=False, **config)
        if depth == 16:
            if bn is False:
                return vgg16(pretrained=False, **config)
            else:
                return vgg16_bn(pretrained=False, **config)
        if depth == 19:
            if bn is False:
                return vgg19(pretrained=False, **config)
            else:
                return vgg19_bn(pretrained=False, **config)
    elif dataset == 'cifar10':
        config.setdefault('num_classes', 10)
    elif dataset == 'cifar100':
        config.setdefault('num_classes', 100)
    config.setdefault('batch_norm', bn)
    return VGG(model_name[depth], **config) 
开发者ID:eladhoffer,项目名称:convNet.pytorch,代码行数:35,代码来源:vgg.py

示例6: vgg_fc

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def vgg_fc(relu_end=True, linear_end=True):
    model = vgg16(pretrained=True)
    vfc = model.classifier
    del vfc._modules['6'] # Get rid of linear layer
    del vfc._modules['5'] # Get rid of linear layer
    if not relu_end:
        del vfc._modules['4'] # Get rid of linear layer
        if not linear_end:
            del vfc._modules['3']
    return vfc 
开发者ID:rowanz,项目名称:neural-motifs,代码行数:12,代码来源:resnet.py

示例7: load_vgg

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
开发者ID:rowanz,项目名称:neural-motifs,代码行数:13,代码来源:object_detector.py

示例8: vggnet_pytorch

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def vggnet_pytorch():
    return vgg.vgg16() 
开发者ID:gzuidhof,项目名称:nn-transfer,代码行数:4,代码来源:vggnet.py

示例9: load_vgg

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def load_vgg(use_dropout=False, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
开发者ID:KaihuaTang,项目名称:VCTree-Scene-Graph-Generation,代码行数:13,代码来源:object_detector.py

示例10: test_speedup_vgg16

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def test_speedup_vgg16(self):
        prune_model_l1(vgg16())
        model = vgg16()
        model.train()
        ms = ModelSpeedup(model, torch.randn(2, 3, 32, 32), MASK_FILE)
        ms.speedup_model()

        orig_model = vgg16()
        assert model.training
        assert model.features[2].out_channels == int(orig_model.features[2].out_channels * SPARSITY)
        assert model.classifier[0].in_features == int(orig_model.classifier[0].in_features * SPARSITY) 
开发者ID:microsoft,项目名称:nni,代码行数:13,代码来源:test_model_speedup.py

示例11: test_integration

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def test_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)

    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    # We can now use BaaL to create the active learning loop.

    model = ModelWrapper(model, criterion)
    # We create an ActiveLearningLoop that will automatically label the most uncertain samples.
    # In this case, we use the widely used BALD heuristic.

    active_loop = ActiveLearningLoop(al_dataset,
                                     model.predict_on_dataset,
                                     heuristic=heuristics.BALD(),
                                     ndata_to_label=10,
                                     batch_size=10,
                                     iterations=10,
                                     use_cuda=use_cuda,
                                     workers=4)

    # We're all set!
    num_steps = 10
    for step in range(num_steps):
        old_param = list(map(lambda x: x.clone(), model.model.parameters()))
        model.train_on_dataset(al_dataset, optimizer=optimizer, batch_size=10,
                               epoch=5, use_cuda=use_cuda, workers=2)
        model.test_on_dataset(cifar10_test, batch_size=10, use_cuda=use_cuda,
                              workers=2)

        if not active_loop.step():
            break
        new_param = list(map(lambda x: x.clone(), model.model.parameters()))
        assert any([not np.allclose(i.detach(), j.detach())
                    for i, j in zip(old_param, new_param)])
    assert step == 4  # 10 + (4 * 10) = 50, so it stops at iterations 4 
开发者ID:ElementAI,项目名称:baal,代码行数:48,代码来源:integration_test.py

示例12: test_calibration_integration

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def test_calibration_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    # we don't create different trainset for calibration since the goal is not
    # to calibrate
    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)
    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    wrapper = ModelWrapper(model, criterion)
    calibrator = DirichletCalibrator(wrapper=wrapper, num_classes=10,
                                     lr=0.001, reg_factor=0.01)


    for step in range(2):
        wrapper.train_on_dataset(al_dataset, optimizer=optimizer,
                                 batch_size=10, epoch=1,
                                 use_cuda=use_cuda, workers=0)

        wrapper.test_on_dataset(cifar10_test, batch_size=10,
                                use_cuda=use_cuda, workers=0)


        before_calib_param = list(map(lambda x: x.clone(), wrapper.model.parameters()))

        calibrator.calibrate(al_dataset, cifar10_test,
                            batch_size=10, epoch=5,
                            use_cuda=use_cuda, double_fit=False, workers=0)

        after_calib_param = list(map(lambda x: x.clone(), model.parameters()))


        assert all([np.allclose(i.detach(), j.detach())
                    for i, j in zip(before_calib_param, after_calib_param)])

        assert len(list(wrapper.model.modules())) < len(list(calibrator.calibrated_model.modules())) 
开发者ID:ElementAI,项目名称:baal,代码行数:46,代码来源:integration_test.py

示例13: vggnet_keras

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def vggnet_keras():

    # Block 1
    img_input = Input((3, 224, 224))
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.0')(img_input)
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.5')(x)
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.7')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.10')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.12')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.14')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.17')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.19')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.21')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.24')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.26')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.28')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='classifier.0')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='classifier.3')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation=None, name='classifier.6')(x)

    # Create model.
    model = Model(img_input, x, name='vgg16')

    return model 
开发者ID:gzuidhof,项目名称:nn-transfer,代码行数:57,代码来源:vggnet.py

示例14: __init__

# 需要导入模块: from torchvision.models import vgg [as 别名]
# 或者: from torchvision.models.vgg import vgg16 [as 别名]
def __init__(
            self,
            layer_name_mapping=None,
            normalize=True,
            device='gpu',
            vgg_model=None,
            full=False,
            inplace=False,
            distance=2,
    ):
        super(VGGLoss, self).__init__()
        self.layer_name_mapping = layer_name_mapping
        if self.layer_name_mapping is None:
            self.layer_name_mapping = {
                '0': 'conv1_0',
                # '1': 'relu1_0',
                '2': "conv1_1",
                # '3': 'relu1_1',
                '7': "conv2_2",
                # '8': "relu2_2",
                '14': "conv3_3",
                # '15': "relu3_3",
                '21': "conv4_3",
                # '22': "relu4_3",  # <- gradient is strangely huge... turn off for now
            }

        self.normalize = normalize
        self.device = device
        self.full = full
        if distance == 1:
            self.distance = F.l1_loss
        else:
            self.distance = F.mse_loss

        if vgg_model is None:
            if inplace:
                vgg_model = vgg.vgg16(pretrained=True)
            else:
                vgg_model = modified_vgg.vgg16(pretrained=True)

        vgg_model.to(self.device)
        vgg_model.eval()

        self.vgg_layers = vgg_model.features
        del vgg_model

        # normalizatoin
        self.mean_t = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
        self.std_t = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
        self.mean_t = self.mean_t.view(1, 3, 1, 1).to(self.device)
        self.std_t = self.std_t.view(1, 3, 1, 1).to(self.device) 
开发者ID:khammernik,项目名称:sigmanet,代码行数:53,代码来源:vgg_loss.py


注:本文中的torchvision.models.vgg.vgg16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。