本文整理汇总了Python中torchvision.models方法的典型用法代码示例。如果您正苦于以下问题:Python torchvision.models方法的具体用法?Python torchvision.models怎么用?Python torchvision.models使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torchvision
的用法示例。
在下文中一共展示了torchvision.models方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def build_model(args):
if not hasattr(torchvision.models, args.model):
raise ValueError('Invalid model "%s"' % args.model)
if not 'resnet' in args.model:
raise ValueError('Feature extraction only supports ResNets')
cnn = getattr(torchvision.models, args.model)(pretrained=True)
layers = [
cnn.conv1,
cnn.bn1,
cnn.relu,
cnn.maxpool,
]
for i in range(args.model_stage):
name = 'layer%d' % (i + 1)
layers.append(getattr(cnn, name))
model = torch.nn.Sequential(*layers)
model.cuda()
model.eval()
return model
示例2: main
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def main():
test_args = parse_args()
args = joblib.load('models/%s/args.pkl' %test_args.name)
folds = []
losses = []
scores = []
for fold in range(args.n_splits):
log_path = 'models/%s/log_%d.csv' %(args.name, fold+1)
if not os.path.exists(log_path):
continue
log = pd.read_csv('models/%s/log_%d.csv' %(args.name, fold+1))
loss, score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values
print(loss, score)
folds.append(str(fold+1))
losses.append(loss)
scores.append(score)
results = pd.DataFrame({
'fold': folds + ['mean'],
'loss': losses + [np.mean(losses)],
'score': scores + [np.mean(scores)],
})
print(results)
results.to_csv('models/%s/results.csv' % args.name, index=False)
示例3: make_layers
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def make_layers(cfg, batch_norm=False):
"""This is almost verbatim from torchvision.models.vgg, except that the
MaxPool2d modules are configured with ceil_mode=True.
"""
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
modules = [conv2d, nn.ReLU(inplace=True)]
if batch_norm:
modules.insert(1, nn.BatchNorm2d(v))
layers.extend(modules)
in_channels = v
return nn.Sequential(*layers)
示例4: __init__
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __init__(self):
super(weightNet, self).__init__()
self.resnet = ClassificationNetwork()
self.resnet.load_state_dict(torch.load('models/'+str(args.network)+'.t7', map_location=lambda storage, loc: storage))
print('loading ',str(args.network))
self.conv1 = self.resnet.convnet.conv1
self.conv1.load_state_dict(self.resnet.convnet.conv1.state_dict())
self.bn1 = self.resnet.convnet.bn1
self.bn1.load_state_dict(self.resnet.convnet.bn1.state_dict())
self.relu = self.resnet.convnet.relu
self.maxpool = self.resnet.convnet.maxpool
self.layer1 = self.resnet.convnet.layer1
self.layer1.load_state_dict(self.resnet.convnet.layer1.state_dict())
self.layer2 = self.resnet.convnet.layer2
self.layer2.load_state_dict(self.resnet.convnet.layer2.state_dict())
self.layer3 = self.resnet.convnet.layer3
self.layer3.load_state_dict(self.resnet.convnet.layer3.state_dict())
self.layer4 = self.resnet.convnet.layer4
self.layer4.load_state_dict(self.resnet.convnet.layer4.state_dict())
self.layer4 = self.resnet.convnet.layer4
self.layer4.load_state_dict(self.resnet.convnet.layer4.state_dict())
self.avgpool = self.resnet.convnet.avgpool
示例5: build_model
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def build_model(args):
if not hasattr(torchvision.models, args.model):
raise ValueError('Invalid model "%s"' % args.model)
if not 'resnet' in args.model:
raise ValueError('Feature extraction only supports ResNets')
cnn = getattr(torchvision.models, args.model)(pretrained=True)
layers = [cnn.conv1,
cnn.bn1,
cnn.relu,
cnn.maxpool]
for i in range(args.model_stage):
name = 'layer%d' % (i + 1)
layers.append(getattr(cnn, name))
model = torch.nn.Sequential(*layers)
model.cuda()
model.eval()
return model
示例6: _test_model
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def _test_model(self, model_config):
"""This test will build ResNeXt-* models, run a forward pass and
verify output shape, and then verify that get / set state
works.
I do this in one test so that we construct the model a minimum
number of times.
"""
model = build_model(model_config)
# Verify forward pass works
input = torch.ones([1, 3, 32, 32])
output = model.forward(input)
self.assertEqual(output.size(), (1, 1000))
# Verify get_set_state
new_model = build_model(model_config)
state = model.get_classy_state()
new_model.set_classy_state(state)
new_state = new_model.get_classy_state()
compare_model_state(self, state, new_state, check_heads=True)
示例7: __init__
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __init__(self, name: str, frozen_start: bool, fp16: bool):
super().__init__()
if name.endswith('_wsl'):
self.base = torch.hub.load('facebookresearch/WSL-Images', name)
else:
self.base = getattr(models, name)(pretrained=True)
self.frozen_start = frozen_start
self.fp16 = fp16
if name == 'resnet34':
self.out_features_l1 = 256
self.out_features_l2 = 512
else:
self.out_features_l1 = 512
self.out_features_l2 = 1024
self.frozen = []
if self.frozen_start:
self.frozen = [self.base.layer1, self.base.conv1, self.base.bn1]
for m in self.frozen:
self._freeze(m)
示例8: __init__
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __init__(
self,
name: str = "resnet50",
visual_feature_size: int = 2048,
pretrained: bool = False,
frozen: bool = False,
):
super().__init__(visual_feature_size)
self.cnn = getattr(torchvision.models, name)(
pretrained, zero_init_residual=True
)
# Do nothing after the final residual stage.
self.cnn.fc = nn.Identity()
# Freeze all weights if specified.
if frozen:
for param in self.cnn.parameters():
param.requires_grad = False
self.cnn.eval()
# Keep a list of intermediate layer names.
self._stage_names = [f"layer{i}" for i in range(1, 5)]
示例9: parseArgs
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def parseArgs():
parser = argparse.ArgumentParser(prog=sys.argv[0],
description="Run popular imagenet models.")
parser.add_argument("-m",
type=str,
default="resnet50",
choices=["alexnet", "densenet121", "densenet161", "densenet169",
"densenet201", "googlenet", "mnasnet0_5", "mnasnet0_75",
"mnasnet1_0", "mnasnet1_3", "mobilenet_v2", "resnet18",
"resnet34", "resnet50", "resnet101", "resnet152", "resnext50_32x4d",
"resnext101_32x8d", "wide_resnet50_2", "wide_resnet101_2",
"shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "shufflenet_v2_x1_5",
"shufflenet_v2_x2_0", "squeezenet1_0", "squeezenet1_1", "vgg11",
"vgg11_bn", "vgg13", "vgg13_bn", "vgg16", "vgg16_bn", "vgg19",
"vgg19_bn", "inception_v3"],
help="Model.")
parser.add_argument("-b",
type=int,
default=32,
help="Batch size.")
args = parser.parse_args()
return args
示例10: __init__
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __init__(self, in_channels, backbone, out_channels_gcn=(85, 128), kernel_sizes=(5, 7)):
super(ResnetGCN, self).__init__()
resnet = getattr(torchvision.models, backbone)(pretrained=False)
if in_channels == 3: conv1 = resnet.conv1
else: conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.initial = nn.Sequential(
conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool)
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = nn.Sequential(
BottleneckGCN(512, 1024, kernel_sizes[0], out_channels_gcn[0], stride=2),
*[BottleneckGCN(1024, 1024, kernel_sizes[0], out_channels_gcn[0])]*5)
self.layer4 = nn.Sequential(
BottleneckGCN(1024, 2048, kernel_sizes[1], out_channels_gcn[1], stride=2),
*[BottleneckGCN(1024, 1024, kernel_sizes[1], out_channels_gcn[1])]*5)
initialize_weights(self)
示例11: get_resnet_pretrained
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def get_resnet_pretrained(self, archi_type, feat_length, grayscale=True):
from torchvision import models
model = getattr(models, archi_type)(pretrained=True)
in_features = model.fc.in_features
if grayscale:
# replace the first convolution layer
stride = model.conv1.kernel_size
padding = model.conv1.padding
kernel_size = model.conv1.kernel_size
out_channels = model.conv1.out_channels
del model.conv1
model.conv1 = nn.Conv2d(1, out_channels, kernel_size, stride, padding)
# replace the FC layer
del model.fc
model.fc = nn.Linear(in_features, feat_length, bias=True)
return model
示例12: make_layers
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def make_layers(cfg, batch_norm=False):
"""This is almost verbatim from torchvision.models.vgg, except that the
MaxPool2d modules are configured with ceil_mode=True.
"""
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
modules = [conv2d, nn.ReLU(inplace=True)]
if batch_norm:
modules.insert(1, nn.BatchNorm2d(v))
layers.extend(modules)
in_channels = v
return nn.Sequential(*layers)
示例13: test_channel_dependency
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def test_channel_dependency(self):
outdir = os.path.join(prefix, 'dependency')
os.makedirs(outdir, exist_ok=True)
for name in model_names:
print('Analyze channel dependency for %s' % name)
model = getattr(models, name)
net = model().to(device)
dummy_input = torch.ones(1, 3, 224, 224).to(device)
channel_depen = ChannelDependency(net, dummy_input)
depen_sets = channel_depen.dependency_sets
d_set_count = 0
for d_set in depen_sets:
if len(d_set) > 1:
d_set_count += 1
assert d_set in channel_dependency_ground_truth[name]
assert d_set_count == len(channel_dependency_ground_truth[name])
fpath = os.path.join(outdir, name)
channel_depen.export(fpath)
示例14: __init__
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __init__(self, args):
super(AlexNetFc, self).__init__()
self.base_model = torchvision.models.alexnet(pretrained=True)
self.features = self.base_model.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module("classifier" + str(i), self.base_model.classifier[i])
self.feature_layers = nn.Sequential(self.features, self.classifier)
self.hash_bit = args.hash_bit
feature_dim = self.base_model.classifier[6].in_features
self.fc1 = nn.Linear(feature_dim, feature_dim)
self.activation1 = nn.ReLU()
self.fc2 = nn.Linear(feature_dim, feature_dim)
self.activation2 = nn.ReLU()
self.fc3 = nn.Linear(feature_dim, self.hash_bit)
self.last_layer = nn.Tanh()
self.dropout = nn.Dropout(0.5)
self.hash_layer = nn.Sequential(self.fc1, self.activation1, self.fc2, self.activation2, self.fc3,
self.last_layer)
示例15: __build_model
# 需要导入模块: import torchvision [as 别名]
# 或者: from torchvision import models [as 别名]
def __build_model(self):
"""Define model layers & loss."""
# 1. Load pre-trained network:
model_func = getattr(models, self.backbone)
backbone = model_func(pretrained=True)
_layers = list(backbone.children())[:-1]
self.feature_extractor = torch.nn.Sequential(*_layers)
freeze(module=self.feature_extractor, train_bn=self.train_bn)
# 2. Classifier:
_fc_layers = [torch.nn.Linear(2048, 256),
torch.nn.Linear(256, 32),
torch.nn.Linear(32, 1)]
self.fc = torch.nn.Sequential(*_fc_layers)
# 3. Loss:
self.loss_func = F.binary_cross_entropy_with_logits