本文整理汇总了Python中utils.get_model方法的典型用法代码示例。如果您正苦于以下问题:Python utils.get_model方法的具体用法?Python utils.get_model怎么用?Python utils.get_model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.get_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
示例2: calc_loss
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def calc_loss(model, batch):
previous, target, length, mask = batch
output, _ = model(previous, length=length.unsqueeze(0))
bsz = previous.size(1)
raw_loss = F.cross_entropy(output.view(-1, get_model(model).ntoken), target.view(-1), reduction='none')
raw_loss = raw_loss.view(-1, bsz)
loss = (raw_loss * mask.float()).sum(0).mean()
items = [loss.data.item(), bsz, mask.sum().item()]
return loss, items
示例3: classify
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def classify(image):
model = get_model("efficientnet-b0")
img = preprocess_img(image)
return predict(model, img)
示例4: save_all_models
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
示例5: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def main():
opt = parse_args()
kwargs = {'ctx': [mx.cpu()], 'pretrained': False, 'classes': 1000, 'ratio': opt.ratio}
if opt.use_se:
kwargs['use_se'] = True
logging.info("get symbol ...")
net = get_model(opt.model, **kwargs)
# Option 1
logging.info("option 1: print network ...")
logging.info(net)
# Option 2 (net must be HybridSequential, if want to plot whole graph)
logging.info("option 2: draw network ...")
net.hybridize()
net.collect_params().initialize()
x = mx.sym.var('data')
sym = net(x)
digraph = mx.viz.plot_network(sym, shape={'data':(1, 3, 224, 224)}, save_format = 'png')
digraph.view()
digraph.render()
keys = sorted(dict(net.collect_params()).keys())
logging.info(json.dumps(keys, indent=4))
示例6: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(
e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best, save model as old
utils.set_model_(self.model,best_model)
if t>0:
model_state = utils.get_model(self.model)
model_old_state = utils.get_model(self.model_old)
for name, param in self.model.named_parameters():
#model_state[name]=(1-self.alpha)*model_old_state[name]+self.alpha*model_state[name]
model_state[name]=(model_state[name]+model_old_state[name]*t)/(t+1)
utils.set_model_(self.model,model_state)
self.model_old=deepcopy(self.model)
utils.freeze_model(self.model_old)
self.model_old.eval()
return
示例7: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best and save model as old
utils.set_model_(self.model,best_model)
self.model_old=deepcopy(self.model)
self.model_old.eval()
utils.freeze_model(self.model_old)
return
示例8: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(
e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best
utils.set_model_(self.model,best_model)
# Model update
if t==0:
self.fisher=utils.fisher_matrix_diag(t,xtrain,ytrain,self.model,self.criterion)
else:
fisher_new=utils.fisher_matrix_diag(t,xtrain,ytrain,self.model,self.criterion)
for (n,p),(_,p_old) in zip(self.model.named_parameters(),self.model_old.named_parameters()):
p=fisher_new[n]*p+self.fisher[n]*p_old
self.fisher[n]+=fisher_new[n]
p/=(self.fisher[n]==0).float()+self.fisher[n]
# Old model save
self.model_old=deepcopy(self.model)
self.model_old.eval()
utils.freeze_model(self.model_old)
return
示例9: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(
e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best
utils.set_model_(self.model,best_model)
# Update old
self.model_old=deepcopy(self.model)
self.model_old.eval()
utils.freeze_model(self.model_old) # Freeze the weights
# Fisher ops
if t>0:
fisher_old={}
for n,_ in self.model.named_parameters():
fisher_old[n]=self.fisher[n].clone()
self.fisher=utils.fisher_matrix_diag(t,xtrain,ytrain,self.model,self.criterion)
if t>0:
# Watch out! We do not want to keep t models (or fisher diagonals) in memory, therefore we have to merge fisher diagonals
for n,_ in self.model.named_parameters():
self.fisher[n]=(self.fisher[n]+fisher_old[n]*t)/(t+1) # Checked: it is better than the other option
#self.fisher[n]=0.5*(self.fisher[n]+fisher_old[n])
return
示例10: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,tasks,xtrain,ytrain,xvalid,yvalid):
self.model=deepcopy(self.initial_model) # Restart model
task_t,task_v=tasks
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
try:
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(task_t,xtrain,ytrain)
clock1=time.time()
train_loss=self.eval_validation(task_t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f} |'.format(e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss),end='')
# Valid
valid_loss=self.eval_validation(task_v,xvalid,yvalid)
print(' Valid: loss={:.3f} |'.format(valid_loss),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
except KeyboardInterrupt:
print()
# Restore best
utils.set_model_(self.model,best_model)
return
示例11: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
self.model=deepcopy(self.initial_model) # Restart model
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,
1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best
utils.set_model_(self.model,best_model)
return
示例12: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,
1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best & freeze
utils.set_model_(self.model,best_model)
for n,p in self.model.named_parameters():
if not n.startswith('last'):
p.requires_grad=False
return
示例13: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
#train only the column for the current task
self.model.unfreeze_column(t)
#the optimizer trains solely the params for the current task
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(
e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best
utils.set_model_(self.model,best_model)
return
示例14: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def train(self,t,xtrain,ytrain,xvalid,yvalid):
best_loss=np.inf
best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(
e+1,1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
# Restore best and save model as old
utils.set_model_(self.model,best_model)
self.model_old=deepcopy(self.model)
self.model_old.eval()
utils.freeze_model(self.model_old)
return
示例15: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import get_model [as 别名]
def main():
train_df = pd.DataFrame()
eval_df = pd.DataFrame()
num_classes = 10
model = get_model(args.model, num_classes=num_classes,
normalize_input=args.normalize_input)
if use_cuda:
model = torch.nn.DataParallel(model).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov)
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
lr = adjust_learning_rate(optimizer, epoch)
logger.info('Setting learning rate to %g' % lr)
# adversarial training
train_data = train(args, model, device, train_loader, optimizer, epoch)
train_df = train_df.append(pd.DataFrame(train_data), ignore_index=True)
# evaluation on natural examples
logging.info(120 * '=')
if epoch % args.eval_freq == 0 or epoch == args.epochs:
eval_data = {'epoch': int(epoch)}
eval_data.update(
eval(args, model, device, 'train', eval_train_loader))
eval_data.update(
eval(args, model, device, 'test', eval_test_loader))
eval_df = eval_df.append(pd.Series(eval_data), ignore_index=True)
logging.info(120 * '=')
# save stats
train_df.to_csv(os.path.join(model_dir, 'stats_train.csv'))
eval_df.to_csv(os.path.join(model_dir, 'stats_eval.csv'))
# save checkpoint
if epoch % args.save_freq == 0 or epoch == args.epochs:
torch.save(dict(num_classes=num_classes,
state_dict=model.state_dict(),
normalize_input=args.normalize_input),
os.path.join(model_dir,
'checkpoint-epoch{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(),
os.path.join(model_dir,
'opt-checkpoint_epoch{}.tar'.format(epoch)))
# ------------------------------------------------------------------------------