當前位置: 首頁>>代碼示例>>Python>>正文


Python vgg.vgg16方法代碼示例

本文整理匯總了Python中torchvision.models.vgg.vgg16方法的典型用法代碼示例。如果您正苦於以下問題:Python vgg.vgg16方法的具體用法?Python vgg.vgg16怎麽用?Python vgg.vgg16使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torchvision.models.vgg的用法示例。


在下文中一共展示了vgg.vgg16方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_model

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def build_model(self):
        self.netG = Generator(n_residual_blocks=self.num_residuals, upsample_factor=self.upscale_factor, base_filter=64, num_channel=1).to(self.device)
        self.netD = Discriminator(base_filter=64, num_channel=1).to(self.device)
        self.feature_extractor = vgg16(pretrained=True)
        self.netG.weight_init(mean=0.0, std=0.2)
        self.netD.weight_init(mean=0.0, std=0.2)
        self.criterionG = nn.MSELoss()
        self.criterionD = nn.BCELoss()
        torch.manual_seed(self.seed)

        if self.GPU_IN_USE:
            torch.cuda.manual_seed(self.seed)
            self.feature_extractor.cuda()
            cudnn.benchmark = True
            self.criterionG.cuda()
            self.criterionD.cuda()

        self.optimizerG = optim.Adam(self.netG.parameters(), lr=self.lr, betas=(0.9, 0.999))
        self.optimizerD = optim.SGD(self.netD.parameters(), lr=self.lr / 100, momentum=0.9, nesterov=True)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerG, milestones=[50, 75, 100], gamma=0.5)  # lr decay
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerD, milestones=[50, 75, 100], gamma=0.5)  # lr decay 
開發者ID:icpm,項目名稱:super-resolution,代碼行數:23,代碼來源:solver.py

示例2: __init__

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def __init__(self):
        super(GeneratorLoss, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = TVLoss() 
開發者ID:amanchadha,項目名稱:iSeeBetter,代碼行數:11,代碼來源:loss.py

示例3: vgg16

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def vgg16(*args, **kwargs):
    pretrained = False
    if 'pretrained' in kwargs:
        pretrained = kwargs['pretrained']
        kwargs['pretrained'] = False
    base_vgg = vgg(*args, **kwargs)
    conv_fc6 = nn.Conv2d(in_channels=512,
                         out_channels=4096,
                         kernel_size=7,
                         padding=3)

    conv_fc7 = nn.Conv2d(in_channels=4096,
                         out_channels=4096,
                         kernel_size=1,
                         padding=0)

    conv_fc8 = nn.Conv2d(in_channels=4096,
                         out_channels=2688,
                         kernel_size=1,
                         padding=0)

    fconv_layers = []
    for layer in (conv_fc6, conv_fc7, conv_fc8):
        fconv_layers += [layer, nn.ReLU(), nn.Dropout(p=0.2)]
    base_vgg = list(base_vgg.children())[:-1]
    base_vgg += fconv_layers
    model = nn.Sequential(*base_vgg)
    if pretrained:
        state_dict = model.state_dict()
        pretrained_state = model_zoo.load_url(VGG16_URL)
        for layer_name in pretrained_state:
            if layer_name in state_dict:
                state_dict[layer_name] = pretrained_state[layer_name]
        model.load_state_dict(state_dict)
    return model 
開發者ID:BCV-Uniandes,項目名稱:DMS,代碼行數:37,代碼來源:model_factory.py

示例4: __init__

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def __init__(self):
        super(PerceptualLossVGG16, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss() 
開發者ID:ManuelFritsche,項目名稱:real-world-sr,代碼行數:10,代碼來源:loss.py

示例5: vgg

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def vgg(**config):
    dataset = config.pop('dataset', 'imagenet')
    depth = config.pop('depth', 16)
    bn = config.pop('bn', True)

    if dataset == 'imagenet':
        config.setdefault('num_classes', 1000)
        if depth == 11:
            if bn is False:
                return vgg11(pretrained=False, **config)
            else:
                return vgg11_bn(pretrained=False, **config)
        if depth == 13:
            if bn is False:
                return vgg13(pretrained=False, **config)
            else:
                return vgg13_bn(pretrained=False, **config)
        if depth == 16:
            if bn is False:
                return vgg16(pretrained=False, **config)
            else:
                return vgg16_bn(pretrained=False, **config)
        if depth == 19:
            if bn is False:
                return vgg19(pretrained=False, **config)
            else:
                return vgg19_bn(pretrained=False, **config)
    elif dataset == 'cifar10':
        config.setdefault('num_classes', 10)
    elif dataset == 'cifar100':
        config.setdefault('num_classes', 100)
    config.setdefault('batch_norm', bn)
    return VGG(model_name[depth], **config) 
開發者ID:eladhoffer,項目名稱:convNet.pytorch,代碼行數:35,代碼來源:vgg.py

示例6: vgg_fc

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def vgg_fc(relu_end=True, linear_end=True):
    model = vgg16(pretrained=True)
    vfc = model.classifier
    del vfc._modules['6'] # Get rid of linear layer
    del vfc._modules['5'] # Get rid of linear layer
    if not relu_end:
        del vfc._modules['4'] # Get rid of linear layer
        if not linear_end:
            del vfc._modules['3']
    return vfc 
開發者ID:rowanz,項目名稱:neural-motifs,代碼行數:12,代碼來源:resnet.py

示例7: load_vgg

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
開發者ID:rowanz,項目名稱:neural-motifs,代碼行數:13,代碼來源:object_detector.py

示例8: vggnet_pytorch

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def vggnet_pytorch():
    return vgg.vgg16() 
開發者ID:gzuidhof,項目名稱:nn-transfer,代碼行數:4,代碼來源:vggnet.py

示例9: load_vgg

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def load_vgg(use_dropout=False, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
開發者ID:KaihuaTang,項目名稱:VCTree-Scene-Graph-Generation,代碼行數:13,代碼來源:object_detector.py

示例10: test_speedup_vgg16

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def test_speedup_vgg16(self):
        prune_model_l1(vgg16())
        model = vgg16()
        model.train()
        ms = ModelSpeedup(model, torch.randn(2, 3, 32, 32), MASK_FILE)
        ms.speedup_model()

        orig_model = vgg16()
        assert model.training
        assert model.features[2].out_channels == int(orig_model.features[2].out_channels * SPARSITY)
        assert model.classifier[0].in_features == int(orig_model.classifier[0].in_features * SPARSITY) 
開發者ID:microsoft,項目名稱:nni,代碼行數:13,代碼來源:test_model_speedup.py

示例11: test_integration

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def test_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)

    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    # We can now use BaaL to create the active learning loop.

    model = ModelWrapper(model, criterion)
    # We create an ActiveLearningLoop that will automatically label the most uncertain samples.
    # In this case, we use the widely used BALD heuristic.

    active_loop = ActiveLearningLoop(al_dataset,
                                     model.predict_on_dataset,
                                     heuristic=heuristics.BALD(),
                                     ndata_to_label=10,
                                     batch_size=10,
                                     iterations=10,
                                     use_cuda=use_cuda,
                                     workers=4)

    # We're all set!
    num_steps = 10
    for step in range(num_steps):
        old_param = list(map(lambda x: x.clone(), model.model.parameters()))
        model.train_on_dataset(al_dataset, optimizer=optimizer, batch_size=10,
                               epoch=5, use_cuda=use_cuda, workers=2)
        model.test_on_dataset(cifar10_test, batch_size=10, use_cuda=use_cuda,
                              workers=2)

        if not active_loop.step():
            break
        new_param = list(map(lambda x: x.clone(), model.model.parameters()))
        assert any([not np.allclose(i.detach(), j.detach())
                    for i, j in zip(old_param, new_param)])
    assert step == 4  # 10 + (4 * 10) = 50, so it stops at iterations 4 
開發者ID:ElementAI,項目名稱:baal,代碼行數:48,代碼來源:integration_test.py

示例12: test_calibration_integration

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def test_calibration_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    # we don't create different trainset for calibration since the goal is not
    # to calibrate
    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)
    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    wrapper = ModelWrapper(model, criterion)
    calibrator = DirichletCalibrator(wrapper=wrapper, num_classes=10,
                                     lr=0.001, reg_factor=0.01)


    for step in range(2):
        wrapper.train_on_dataset(al_dataset, optimizer=optimizer,
                                 batch_size=10, epoch=1,
                                 use_cuda=use_cuda, workers=0)

        wrapper.test_on_dataset(cifar10_test, batch_size=10,
                                use_cuda=use_cuda, workers=0)


        before_calib_param = list(map(lambda x: x.clone(), wrapper.model.parameters()))

        calibrator.calibrate(al_dataset, cifar10_test,
                            batch_size=10, epoch=5,
                            use_cuda=use_cuda, double_fit=False, workers=0)

        after_calib_param = list(map(lambda x: x.clone(), model.parameters()))


        assert all([np.allclose(i.detach(), j.detach())
                    for i, j in zip(before_calib_param, after_calib_param)])

        assert len(list(wrapper.model.modules())) < len(list(calibrator.calibrated_model.modules())) 
開發者ID:ElementAI,項目名稱:baal,代碼行數:46,代碼來源:integration_test.py

示例13: vggnet_keras

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def vggnet_keras():

    # Block 1
    img_input = Input((3, 224, 224))
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.0')(img_input)
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.5')(x)
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.7')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.10')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.12')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.14')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.17')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.19')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.21')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.24')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.26')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.28')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='classifier.0')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='classifier.3')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation=None, name='classifier.6')(x)

    # Create model.
    model = Model(img_input, x, name='vgg16')

    return model 
開發者ID:gzuidhof,項目名稱:nn-transfer,代碼行數:57,代碼來源:vggnet.py

示例14: __init__

# 需要導入模塊: from torchvision.models import vgg [as 別名]
# 或者: from torchvision.models.vgg import vgg16 [as 別名]
def __init__(
            self,
            layer_name_mapping=None,
            normalize=True,
            device='gpu',
            vgg_model=None,
            full=False,
            inplace=False,
            distance=2,
    ):
        super(VGGLoss, self).__init__()
        self.layer_name_mapping = layer_name_mapping
        if self.layer_name_mapping is None:
            self.layer_name_mapping = {
                '0': 'conv1_0',
                # '1': 'relu1_0',
                '2': "conv1_1",
                # '3': 'relu1_1',
                '7': "conv2_2",
                # '8': "relu2_2",
                '14': "conv3_3",
                # '15': "relu3_3",
                '21': "conv4_3",
                # '22': "relu4_3",  # <- gradient is strangely huge... turn off for now
            }

        self.normalize = normalize
        self.device = device
        self.full = full
        if distance == 1:
            self.distance = F.l1_loss
        else:
            self.distance = F.mse_loss

        if vgg_model is None:
            if inplace:
                vgg_model = vgg.vgg16(pretrained=True)
            else:
                vgg_model = modified_vgg.vgg16(pretrained=True)

        vgg_model.to(self.device)
        vgg_model.eval()

        self.vgg_layers = vgg_model.features
        del vgg_model

        # normalizatoin
        self.mean_t = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
        self.std_t = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
        self.mean_t = self.mean_t.view(1, 3, 1, 1).to(self.device)
        self.std_t = self.std_t.view(1, 3, 1, 1).to(self.device) 
開發者ID:khammernik,項目名稱:sigmanet,代碼行數:53,代碼來源:vgg_loss.py


注:本文中的torchvision.models.vgg.vgg16方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。