本文整理汇总了Python中models.__dict__方法的典型用法代码示例。如果您正苦于以下问题:Python models.__dict__方法的具体用法?Python models.__dict__怎么用?Python models.__dict__使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models
的用法示例。
在下文中一共展示了models.__dict__方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def main():
exp_dir = os.path.dirname(args.config)
with open(args.config) as f:
if version.parse(yaml.version >= "5.1"):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
for k, v in config.items():
setattr(args, k, v)
model = models.modules.__dict__[args.model['module']['arch']](args.model['module'])
model = torch.nn.DataParallel(model)
ckpt_path = exp_dir + '/checkpoints/ckpt_iter_{}.pth.tar'.format(args.iter)
save_path = exp_dir + '/checkpoints/convert_iter_{}.pth.tar'.format(args.iter)
ckpt = torch.load(ckpt_path)
weight = ckpt['state_dict']
model.load_state_dict(weight, strict=True)
model = model.module.image_encoder
torch.save(model.state_dict(), save_path)
示例2: create_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def create_model(model_type,model_name,out_dim):
# Create Similarity Prediction Network (SPN) by model surgery
model = models.__dict__[model_type].__dict__[model_name](out_dim=out_dim)
n_feat = model.last.in_features
# Replace task-dependent module
model.last = nn.Sequential(
nn.Linear(n_feat*2, n_feat*4),
nn.BatchNorm1d(n_feat*4),
nn.ReLU(inplace=True),
nn.Linear(n_feat*4, 2)
)
# Replace task-dependent function
def new_logits(self, x):
feat1, feat2 = PairEnum(x)
featcat = torch.cat([feat1, feat2], 1)
out = self.last(featcat)
return out
model.logits = MethodType(new_logits, model)
return model
示例3: get_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def get_model(args):
if args.first_layer_dense:
args.first_layer_type = "DenseConv"
print("=> Creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# applying sparsity to the network
if (
args.conv_type != "DenseConv"
and args.conv_type != "SampleSubnetConv"
and args.conv_type != "ContinuousSparseConv"
):
if args.prune_rate < 0:
raise ValueError("Need to set a positive prune rate")
set_model_prune_rate(model, prune_rate=args.prune_rate)
print(
f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
)
# freezing the weights if we are only doing subnet training
if args.freeze_weights:
freeze_model_weights(model)
return model
示例4: _get_optimizer
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def _get_optimizer(optimizer_name, model, **kwargs):
"""
This function serves as interface between the command line and the optimizer.
In fact each optimizer has a different set of parameters and in this way one can just change the optimizer
in his experiments just by changing the parameters passed to the entry point.
Parameters
----------
optimizer_name:
Name of the optimizers. See: torch.optim for a list of possible values
model:
The model with which the training will be done
kwargs:
List of all arguments to be used to init the optimizer
Returns
-------
torch.optim
The optimizer initialized with the provided parameters
"""
# Verify the optimizer exists
assert optimizer_name in torch.optim.__dict__
params = {}
# For all arguments declared in the constructor signature of the selected optimizer
for p in inspect.getfullargspec(torch.optim.__dict__[optimizer_name].__init__).args:
# Add it to a dictionary in case it exists a corresponding value in kwargs
if p in kwargs:
params.update({p: kwargs[p]})
# Create an return the optimizer with the correct list of parameters
return torch.optim.__dict__[optimizer_name](model.parameters(), **params)
示例5: _get_criterion
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def _get_criterion(criterion_name, disable_databalancing, dataset_folder, inmem, workers, **kwargs):
"""
This function serves as an interface between the command line and the criterion.
Parameters
----------
criterion_name : string
Name of the criterion
disable_databalancing : boolean
If True the criterion will not be fed with the class frequencies. Use with care.
dataset_folder : String
Location of the dataset on the file system
inmem : boolean
Load the whole dataset in memory. If False, only file names are stored and images are loaded
on demand. This is slower than storing everything in memory.
workers : int
Number of workers to use for the dataloaders
Returns
-------
torch.nn
The initalized criterion
"""
# Verify that the criterion exists
assert criterion_name in torch.nn.__dict__
# Instantiate the criterion
criterion = torch.nn.__dict__[criterion_name]()
if not disable_databalancing:
try:
logging.info('Loading weights for data balancing')
weights = _load_class_frequencies_weights_from_file(dataset_folder, inmem, workers, **kwargs)
criterion.weight = torch.from_numpy(weights).type(torch.FloatTensor)
except:
logging.warning('Unable to load information for data balancing. Using normal criterion')
return criterion
示例6: _optimizer_options
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def _optimizer_options(parser):
"""
Options specific for optimizers
"""
# List of possible optimizers already implemented in PyTorch
optimizer_options = [name for name in torch.optim.__dict__ if callable(torch.optim.__dict__[name])]
parser_optimizer = parser.add_argument_group('OPTIMIZER', 'Optimizer Options')
parser_optimizer.add_argument('--optimizer-name',
choices=optimizer_options,
default='SGD',
help='optimizer to be used for training')
parser_optimizer.add_argument('--lr',
type=float,
default=0.001,
help='learning rate to be used for training')
parser_optimizer.add_argument('--decay-lr',
type=int,
default=None,
help='drop LR by 10 every N epochs')
parser_optimizer.add_argument('--momentum',
type=float,
default=0,
help='momentum (parameter for the optimizer)')
parser_optimizer.add_argument('--dampening',
type=float,
default=0,
help='dampening (parameter for the SGD)')
parser_optimizer.add_argument('--weight-decay',
type=float,
default=0,
help='weight_decay coefficient, also known as L2 regularization')
示例7: load_snapshot
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def load_snapshot(snapshot_file):
"""Load a training snapshot"""
print("--- Loading model from snapshot")
# Create network
norm_act = partial(InPlaceABN, activation="leaky_relu", activation_param=.01)
body = models.__dict__["net_wider_resnet38_a2"](norm_act=norm_act, dilation=(1, 2, 4, 4))
head = DeeplabV3(4096, 256, 256, norm_act=norm_act, pooling_size=(84, 84))
# Load snapshot and recover network state
data = torch.load(snapshot_file)
body.load_state_dict(data["state_dict"]["body"])
head.load_state_dict(data["state_dict"]["head"])
return body, head, data["state_dict"]["cls"]
示例8: __init__
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def __init__(self, model_path, device=None, benchmark: bool=True):
'''
Args:
module_path: path to pre-trained model (available to download, see README)
device: CUDA device to use. str or torch.device instance
warning: this is restricted to 'cpu' or 'cuda' only
('cuda:1' won't work due to main package arcitecture)
default is choose 'cuda' if available
benchmark: to enable cudnn benchmark mode or not
'''
self.model_path = model_path
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
if benchmark:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
snapshot = torch.load(self.model_path, map_location=self.device)
self.param = snapshot['args']
self.transform = transforms.Compose([
transforms.PreCrop(self.param.pre_crop_expand),
transforms.TrainScale2WH((self.param.crop_width, self.param.crop_height)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.net = models.__dict__[self.param.arch](self.param.modelconfig, None)
self.net.train(False).to(self.device)
weights = models.remove_module_dict(snapshot['state_dict'])
self.net.load_state_dict(weights)
示例9: main
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def main():
args.distributed = args.world_size > 1
args.gpu = 0
if args.distributed:
args.gpu = args.rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)
if args.fp16: assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
model = models.__dict__[args.arch]().cuda()
if args.distributed: model = DDP(model)
data, train_sampler = torch_loader(f'{args.data}-sz/160', 128, 256)
learner = Learner.from_model_data(model, data)
learner.crit = F.cross_entropy
learner.metrics = [accuracy, top5]
if args.fp16: learner.half()
wd=2e-5
update_model_dir(learner, args.save_dir)
fit(learner, '1', 0.03, 1, train_sampler, wd)
data, train_sampler = torch_loader(f'{args.data}-sz/320', 128, 256)
learner.set_data(data)
fit(learner, '3', 1e-1, 1, train_sampler, wd)
data, train_sampler = torch_loader(args.data, 128, 256)
learner.set_data(data)
fit(learner, '3', 1e-1, 1, train_sampler, wd)
print('Finished!')
示例10: create_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def create_model(model_type,model_name,out_dim):
# This function create the model for specific learner
# The create_model(), forward_with_criterion(), and learn() are task-dependent
# Do surgery to generic model if necessary
model = models.__dict__[model_type].__dict__[model_name](out_dim=out_dim)
#n_feat = model.last.in_features # This information is useful
return model
示例11: create_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def create_model(model_type,model_name,out_dim):
# Prepare Constrained Clustering Network (CCN)
model = models.__dict__[model_type].__dict__[model_name](out_dim=out_dim)
return model
示例12: build_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def build_model():
model_name = args.modality + "_" + args.arch
model = models.__dict__[model_name](pretrained=True, num_classes=101)
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
return model
示例13: build_model
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def build_model():
model = models.__dict__[args.arch](pretrained=True, num_classes=101)
model.cuda()
return model
示例14: main
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def main():
parser = argparse.ArgumentParser(description='Compute Model FLOPs and PN')
parser.add_argument('--arch', default='resnet50', type=str)
parser.add_argument('--feature_dim', default=256, type=int)
parser.add_argument('--input_size', default=112, type=int)
parser.add_argument('--batch_size', default=1, type=int)
args = parser.parse_args()
model = models.__dict__[args.arch](feature_dim=args.feature_dim)
flops, n_conv, n_linear = compute_flops(model, args.input_size,
args.batch_size)
bits = compute_param_number(model)
print('[{} ({} conv, {} linear)] FLOPs: {:.2f} G, PN: {:.2f} M'.format(
args.arch, n_conv, n_linear, flops, bits))
示例15: init_optimizer
# 需要导入模块: import models [as 别名]
# 或者: from models import __dict__ [as 别名]
def init_optimizer(self):
optimizer_arg = {'params':self.model.parameters(),
'lr':self.config['lr'],
'weight_decay':self.config['weight_decay']}
if self.config['optimizer'] in ['SGD','RMSprop']:
optimizer_arg['momentum'] = self.config['momentum']
elif self.config['optimizer'] in ['Rprop']:
optimizer_arg.pop('weight_decay')
elif self.config['optimizer'] == 'amsgrad':
optimizer_arg['amsgrad'] = True
self.config['optimizer'] = 'Adam'
self.optimizer = torch.optim.__dict__[self.config['optimizer']](**optimizer_arg)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.config['schedule'],
gamma=0.1)