本文整理汇总了Python中torchvision.models.vgg16_bn方法的典型用法代码示例。如果您正苦于以下问题:Python models.vgg16_bn方法的具体用法?Python models.vgg16_bn怎么用?Python models.vgg16_bn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torchvision.models
的用法示例。
在下文中一共展示了models.vgg16_bn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self, pretrained=False):
super().__init__()
encoder = models.vgg16_bn(pretrained=pretrained).features
self.conv1 = encoder[:6]
self.conv2 = encoder[6:13]
self.conv3 = encoder[13:23]
self.conv4 = encoder[23:33]
self.conv5 = encoder[33:43]
self.center = nn.Sequential(
encoder[43], # MaxPool
make_decoder_block(512, 512, 256))
self.dec5 = make_decoder_block(256 + 512, 512, 256)
self.dec4 = make_decoder_block(256 + 512, 512, 256)
self.dec3 = make_decoder_block(256 + 256, 256, 64)
self.dec2 = make_decoder_block(64 + 128, 128, 32)
self.dec1 = nn.Sequential(
nn.Conv2d(32 + 64, 32, 3, padding=1), nn.ReLU(inplace=True))
self.final = nn.Conv2d(32, 1, kernel_size=1)
示例2: main
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def main():
# model = models.vgg19_bn(pretrained=True)
# _, summary = weight_watcher.analyze(model, alphas=False)
# for key, value in summary.items():
# print('{:10s} : {:}'.format(key, value))
_, summary = weight_watcher.analyze(models.vgg13(pretrained=True), alphas=False)
print('vgg-13 : {:}'.format(summary['lognorm']))
_, summary = weight_watcher.analyze(models.vgg13_bn(pretrained=True), alphas=False)
print('vgg-13-BN : {:}'.format(summary['lognorm']))
_, summary = weight_watcher.analyze(models.vgg16(pretrained=True), alphas=False)
print('vgg-16 : {:}'.format(summary['lognorm']))
_, summary = weight_watcher.analyze(models.vgg16_bn(pretrained=True), alphas=False)
print('vgg-16-BN : {:}'.format(summary['lognorm']))
_, summary = weight_watcher.analyze(models.vgg19(pretrained=True), alphas=False)
print('vgg-19 : {:}'.format(summary['lognorm']))
_, summary = weight_watcher.analyze(models.vgg19_bn(pretrained=True), alphas=False)
print('vgg-19-BN : {:}'.format(summary['lognorm']))
示例3: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self, model_type='vgg13', layer_type='fc6'):
super().__init__()
# get model
if model_type == 'vgg13':
self.original_model = models.vgg13_bn(pretrained=True)
elif model_type == 'vgg16':
self.original_model = models.vgg16_bn(pretrained=True)
else:
raise NameError('Unknown model_type passed')
self.features = self.original_model.features
if layer_type == 'fc6':
self.classifier = nn.Sequential(*list(self.original_model.classifier.children())[:2])
elif layer_type == 'fc7':
self.classifier = nn.Sequential(*list(self.original_model.classifier.children())[:-2])
else:
raise NameError('Uknown layer_type passed')
示例4: extract_layer
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def extract_layer(self, model, backbone_mode, ind):
#pdb.set_trace()
if backbone_mode=='vgg16':
index_dict = {
1: (0,4),
2: (4,9),
3: (9,16),
4: (16,23),
5: (23,30) }
elif backbone_mode=='vgg16_bn':
index_dict = {
1: (0,6),
2: (6,13),
3: (13,23),
4: (23,33),
5: (33,43) }
start, end = index_dict[ind]
modified_model = nn.Sequential(*list(model.features.children())[start:end])
return modified_model
示例5: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self):
super(VGG16_bo,self).__init__()
model=vgg16_bn(pretrained=True)
# 设置网络名称
self.moduel_name=str("VGG16_bo")
#固定提取特征层,权重为预训练权重
self.features=model.features
# 固定权重
for param in self.features.parameters():
param.requires_grad=False
# 分类层
self.classifier=nn.Sequential(
t.nn.Linear(86528, 4096),
t.nn.ReLU(),
t.nn.Dropout(p=0.5),
t.nn.Linear(4096, 4096),
t.nn.ReLU(),
t.nn.Dropout(p=0.5),
t.nn.Linear(4096,2)
)
# 仅对分类层初始化
self._initialize_weights()
示例6: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self, pretrained=True, freeze=True):
super(vgg16_bn, self).__init__()
model_urls['vgg16_bn'] = model_urls['vgg16_bn'].replace('https://', 'http://')
vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(12): # conv2_2
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 19): # conv3_3
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(19, 29): # conv4_3
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(29, 39): # conv5_3
self.slice4.add_module(str(x), vgg_pretrained_features[x])
# fc6, fc7 without atrous conv
self.slice5 = torch.nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
nn.Conv2d(1024, 1024, kernel_size=1)
)
if not pretrained:
init_weights(self.slice1.modules())
init_weights(self.slice2.modules())
init_weights(self.slice3.modules())
init_weights(self.slice4.modules())
init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7
if freeze:
for param in self.slice1.parameters(): # only first conv
param.requires_grad= False
示例7: vgg16_bn
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
示例8: _load_pytorch_model
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def _load_pytorch_model(model_name, summary):
import torchvision.models as models
switcher = {
'alexnet': lambda: models.alexnet(pretrained=True).eval(),
"vgg11": lambda: models.vgg11(pretrained=True).eval(),
"vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
"vgg13": lambda: models.vgg13(pretrained=True).eval(),
"vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
"vgg16": lambda: models.vgg16(pretrained=True).eval(),
"vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
"vgg19": lambda: models.vgg19(pretrained=True).eval(),
"vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
"resnet18": lambda: models.resnet18(pretrained=True).eval(),
"resnet34": lambda: models.resnet34(pretrained=True).eval(),
"resnet50": lambda: models.resnet50(pretrained=True).eval(),
"resnet101": lambda: models.resnet101(pretrained=True).eval(),
"resnet152": lambda: models.resnet152(pretrained=True).eval(),
"squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
"squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
"densenet121": lambda: models.densenet121(pretrained=True).eval(),
"densenet161": lambda: models.densenet161(pretrained=True).eval(),
"densenet201": lambda: models.densenet201(pretrained=True).eval(),
"inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
}
_load_model = switcher.get(model_name, None)
_model = _load_model()
import torch
if torch.cuda.is_available():
_model = _model.cuda()
from perceptron.models.classification.pytorch import PyTorchModel as ClsPyTorchModel
import numpy as np
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
pmodel = ClsPyTorchModel(
_model, bounds=(
0, 1), num_classes=1000, preprocessing=(
mean, std))
return pmodel
示例9: load_pytorch_model
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def load_pytorch_model(model_name):
import torchvision.models as models
switcher = {
'alexnet': lambda: models.alexnet(pretrained=True).eval(),
"vgg11": lambda: models.vgg11(pretrained=True).eval(),
"vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
"vgg13": lambda: models.vgg13(pretrained=True).eval(),
"vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
"vgg16": lambda: models.vgg16(pretrained=True).eval(),
"vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
"vgg19": lambda: models.vgg19(pretrained=True).eval(),
"vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
"resnet18": lambda: models.resnet18(pretrained=True).eval(),
"resnet34": lambda: models.resnet34(pretrained=True).eval(),
"resnet50": lambda: models.resnet50(pretrained=True).eval(),
"resnet101": lambda: models.resnet101(pretrained=True).eval(),
"resnet152": lambda: models.resnet152(pretrained=True).eval(),
"squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
"squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
"densenet121": lambda: models.densenet121(pretrained=True).eval(),
"densenet161": lambda: models.densenet161(pretrained=True).eval(),
"densenet201": lambda: models.densenet201(pretrained=True).eval(),
"inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
}
_load_model = switcher.get(model_name, None)
_model = _load_model()
return _model
示例10: vgg16
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def vgg16(pre): return children(vgg16_bn(pre))[0]
示例11: get_feat_loss
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def get_feat_loss():
vgg_m = vgg16_bn(True).features.cuda().eval()
requires_grad(vgg_m, False)
blocks = [i-1 for i,o in enumerate(children(vgg_m)) if isinstance(o,nn.MaxPool2d)]
feat_loss = FeatureLoss(vgg_m, blocks[2:5], [5,15,2])
return feat_loss
示例12: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self, class_num, droprate=0.5, stride=2, init_model=None, pool='avg'):
super(ft_net_VGG16, self).__init__()
model_ft = models.vgg16_bn(pretrained=True)
# avg pooling to global pooling
#if stride == 1:
# model_ft.layer4[0].downsample[0].stride = (1,1)
# model_ft.layer4[0].conv2.stride = (1,1)
self.pool = pool
if pool =='avg+max':
model_ft.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
model_ft.maxpool2 = nn.AdaptiveMaxPool2d((1,1))
self.model = model_ft
#self.classifier = ClassBlock(4096, class_num, droprate)
elif pool=='avg':
model_ft.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
#self.classifier = ClassBlock(2048, class_num, droprate)
elif pool=='max':
model_ft.maxpool2 = nn.AdaptiveMaxPool2d((1,1))
self.model = model_ft
if init_model!=None:
self.model = init_model.model
self.pool = init_model.pool
#self.classifier.add_block = init_model.classifier.add_block
示例13: vgg16_bn
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
return model
示例14: __init__
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def __init__(self):
super(VggEncoder, self).__init__()
self.featChannel = 512
self.layer1 = tvmodel.vgg16_bn(pretrained=True).features
self.layer1 = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1))),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(True)),
('pool1', nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
('conv2', nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1))),
('bn2', nn.BatchNorm2d(128)),
('relu2', nn.ReLU(True)),
('pool2', nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
('conv3', nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1))),
('bn3', nn.BatchNorm2d(256)),
('relu3', nn.ReLU(True)),
('conv4', nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1))),
('bn4', nn.BatchNorm2d(256)),
('relu4', nn.ReLU(True)),
('pool3', nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
('conv5', nn.Conv2d(256, 512, (3, 3), (1, 1), 1)),
('bn5', nn.BatchNorm2d(512)),
('relu5', nn.ReLU(True)),
('pool4', nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
('conv6', nn.Conv2d(512, 512, (3, 3), stride=1, padding=1)),
('bn6', nn.BatchNorm2d(512)),
('relu6', nn.ReLU(True)),
('conv7', nn.Conv2d(512, 512, (3, 3), (1, 1), 1)),
('bn7', nn.BatchNorm2d(512)),
('relu7', nn.ReLU(True)),
('pool5', nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
]))
self.fc_3dmm = nn.Sequential(OrderedDict([
('fc1', nn.Linear(self.featChannel*3, 256*3)),
('relu1', nn.ReLU(True)),
('fc2', nn.Linear(256*3, 228))]))
self.fc_pose = nn.Sequential(OrderedDict([
('fc3', nn.Linear(512, 256)),
('relu2', nn.ReLU(True)),
('fc4', nn.Linear(256, 7))]))
reset_params(self.fc_3dmm)
reset_params(self.fc_pose)
示例15: net_init
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16_bn [as 别名]
def net_init(self, input_size, ms_ks):
input_w, input_h = input_size
self.fc_input_feature = 5 * int(input_w/16) * int(input_h/16)
self.backbone = models.vgg16_bn(pretrained=self.pretrained).features
# ----------------- process backbone -----------------
for i in [34, 37, 40]:
conv = self.backbone._modules[str(i)]
dilated_conv = nn.Conv2d(
conv.in_channels, conv.out_channels, conv.kernel_size, stride=conv.stride,
padding=tuple(p * 2 for p in conv.padding), dilation=2, bias=(conv.bias is not None)
)
dilated_conv.load_state_dict(conv.state_dict())
self.backbone._modules[str(i)] = dilated_conv
self.backbone._modules.pop('33')
self.backbone._modules.pop('43')
# ----------------- SCNN part -----------------
self.layer1 = nn.Sequential(
nn.Conv2d(512, 1024, 3, padding=4, dilation=4, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU() # (nB, 128, 36, 100)
)
# ----------------- add message passing -----------------
self.message_passing = nn.ModuleList()
self.message_passing.add_module('up_down', nn.Conv2d(128, 128, (1, ms_ks), padding=(0, ms_ks // 2), bias=False))
self.message_passing.add_module('down_up', nn.Conv2d(128, 128, (1, ms_ks), padding=(0, ms_ks // 2), bias=False))
self.message_passing.add_module('left_right',
nn.Conv2d(128, 128, (ms_ks, 1), padding=(ms_ks // 2, 0), bias=False))
self.message_passing.add_module('right_left',
nn.Conv2d(128, 128, (ms_ks, 1), padding=(ms_ks // 2, 0), bias=False))
# (nB, 128, 36, 100)
# ----------------- SCNN part -----------------
self.layer2 = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(128, 5, 1) # get (nB, 5, 36, 100)
)
self.layer3 = nn.Sequential(
nn.Softmax(dim=1), # (nB, 5, 36, 100)
nn.AvgPool2d(2, 2), # (nB, 5, 18, 50)
)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_feature, 128),
nn.ReLU(),
nn.Linear(128, 4),
nn.Sigmoid()
)