本文整理汇总了Python中train.Trainer方法的典型用法代码示例。如果您正苦于以下问题:Python train.Trainer方法的具体用法?Python train.Trainer怎么用?Python train.Trainer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类train
的用法示例。
在下文中一共展示了train.Trainer方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: simpletest1
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def simpletest1():
# test if the code can learn a simple sequence
opt = parse()
opts(opt)
epochs = 40
train_loader, val_loader, valvideo_loader = get_dataset(opt)
trainer = train.Trainer()
basemodel = nn.Linear(100, 5)
model = AsyncTFBase(basemodel, 100, opt).cuda()
criterion = AsyncTFCriterion(opt).cuda()
optimizer = torch.optim.SGD(model.parameters(), opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
epoch = -1
for i in range(epochs):
top1, _ = trainer.train(train_loader, model, criterion, optimizer, i, opt)
print('cls weights: {}, aa weights: {}'.format(
model.mA.parameters().next().norm().data[0],
model.mAAa.parameters().next().norm().data[0]))
top1, _ = trainer.validate(train_loader, model, criterion, epochs, opt)
for i in range(5):
top1val, _ = trainer.validate(val_loader, model, criterion, epochs + i, opt)
print('top1val: {}'.format(top1val))
ap = trainer.validate_video(valvideo_loader, model, criterion, epoch, opt)
return top1, top1val, ap
示例2: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
from train import Trainer
net=Net(VinConfig)
net=net.cuda()
net=net.double()
trainer=Trainer(VinConfig,net)
trainer.train()
示例3: __init__
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def __init__(self, sess, args):
self.args = args
self.model = Model(sess,
optimizer_params={
'learning_rate': args.learning_rate, 'alpha': 0.99, 'epsilon': 1e-5}, args=self.args)
self.trainer = Trainer(sess, self.model, args=self.args)
self.env_class = A2C.env_name_parser(self.args.env_class)
示例4: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
global args, best_top1
args = parse()
if not args.no_logger:
tee.Tee(args.cache + '/log.txt')
print(vars(args))
seed(args.manual_seed)
model, criterion, optimizer = create_model(args)
if args.resume:
best_top1 = checkpoints.load(args, model, optimizer)
print(model)
trainer = train.Trainer()
loaders = get_dataset(args)
train_loader = loaders[0]
if args.evaluate:
scores = validate(trainer, loaders, model, criterion, args)
checkpoints.score_file(scores, "{}/model_000.txt".format(args.cache))
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
trainer.train_sampler.set_epoch(epoch)
scores = {}
scores.update(trainer.train(train_loader, model, criterion, optimizer, epoch, args))
scores.update(validate(trainer, loaders, model, criterion, args, epoch))
is_best = scores[args.metric] > best_top1
best_top1 = max(scores[args.metric], best_top1)
checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric)
if not args.nopdb:
pdb.set_trace()
示例5: __init__
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def __init__(self, sess):
"""
:param sess: the tensorflow session
"""
self.sess = sess
self.config = ResConfig()
self.model = RESModel(self.config)
self.model.build_model()
self.data = GenerateData(self.config)
self.trainer = Trainer(self.sess, self.model, self.data, self.config)
示例6: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
data_transformer = DataTransformer(config.dataset_path, use_cuda=config.use_cuda)
vanilla_encoder = VanillaEncoder(vocab_size=data_transformer.vocab_size,
embedding_size=config.encoder_embedding_size,
output_size=config.encoder_output_size)
vanilla_decoder = VanillaDecoder(hidden_size=config.decoder_hidden_size,
output_size=data_transformer.vocab_size,
max_length=data_transformer.max_length,
teacher_forcing_ratio=config.teacher_forcing_ratio,
sos_id=data_transformer.SOS_ID,
use_cuda=config.use_cuda)
if config.use_cuda:
vanilla_encoder = vanilla_encoder.cuda()
vanilla_decoder = vanilla_decoder.cuda()
seq2seq = Seq2Seq(encoder=vanilla_encoder,
decoder=vanilla_decoder)
trainer = Trainer(seq2seq, data_transformer, config.learning_rate, config.use_cuda)
trainer.load_model()
while(True):
testing_word = input('You say: ')
if testing_word == "exit":
break
results = trainer.evaluate(testing_word)
print("Model says: %s" % results[0])
示例7: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
global opt, best_err1
opt = parser.parse_args()
best_err1 = 1000000
print(opt)
model = init.load_model(opt)
model, criterion, optimizer = init.setup(model,opt)
print(model)
trainer = train.Trainer(model, criterion, optimizer, opt, writer)
validator = train.Validator(model, criterion, opt, writer)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
cudnn.deterministic = True
if opt.resume:
if os.path.isfile(opt.resume):
model, optimizer, opt, best_err1 = init.resumer(opt, model, optimizer)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
cudnn.benchmark = True
dataloader = ld.GazeFollow(opt)
train_loader = dataloader.train_loader
val_loader = dataloader.val_loader
for epoch in range(opt.start_epoch, opt.epochs):
utils.adjust_learning_rate(opt, optimizer, epoch)
print("Starting epoch number:", epoch+1, "Learning rate:", optimizer.param_groups[0]["lr"])
if opt.testOnly == False:
trainer.train(train_loader, epoch, opt)
err = validator.validate(val_loader, epoch, opt)
best_err1 = min(err, best_err1)
if epoch % 10 == 0:
init.save_checkpoint(opt, model, optimizer, best_err1, epoch)
print('Best error: [{0:.3f}]\t'.format(best_err1))
示例8: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main(train_cfg='config/pretrain.json',
model_cfg='config/bert_base.json',
data_file='../tbc/books_large_all.txt',
model_file=None,
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/pretrain',
log_dir='../exp/bert/pretrain/runs',
max_len=512,
max_pred=20,
mask_prob=0.15):
cfg = Config(**json.load(open(config, "r")))
cfg_optim = train.Config(**json.load(open(cfg.cfg_optim, "r")))
cfg_model = models.Config(**json.load(open(cfg.cfg_model, "r")))
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
tokenize = lambda x: tokenizer.tokenize(tokenizer.convert_to_unicode(x))
pipeline = [Preprocess4Pretrain(max_pred,
mask_prob,
list(tokenizer.vocab.keys()),
tokenizer.convert_tokens_to_ids,
max_len)]
data_iter = SentPairDataLoader(data_file,
cfg.batch_size,
tokenize,
max_len,
pipeline=pipeline)
model = BertModel4Pretrain(model_cfg)
criterion1 = nn.CrossEntropyLoss(reduction='none')
criterion2 = nn.CrossEntropyLoss()
optimizer = optim.optim4GPU(cfg, model)
trainer = train.Trainer(cfg, model, data_iter, optimizer, save_dir, get_device())
writer = SummaryWriter(log_dir=log_dir) # for tensorboardX
def get_loss(model, batch, global_step): # make sure loss is tensor
input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next = batch
logits_lm, logits_clsf = model(input_ids, segment_ids, input_mask, masked_pos)
loss_lm = criterion1(logits_lm.transpose(1, 2), masked_ids) # for masked LM
loss_lm = (loss_lm*masked_weights.float()).mean()
loss_clsf = criterion2(logits_clsf, is_next) # for sentence classification
writer.add_scalars('data/scalar_group',
{'loss_lm': loss_lm.item(),
'loss_clsf': loss_clsf.item(),
'loss_total': (loss_lm + loss_clsf).item(),
'lr': optimizer.get_lr()[0],
},
global_step)
return loss_lm + loss_clsf
trainer.train(get_loss, model_file, None, data_parallel)
示例9: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
global opt, best_prec1
opt = parser.parse_args()
opt.logdir = opt.logdir+'/'+opt.name
logger = None#Logger(opt.logdir)
opt.lr = opt.maxlr
print(opt)
best_prec1 = 0
cudnn.benchmark = True
model = init_model.load_model(opt)
if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
elif opt.ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
print(model)
model, criterion, optimizer = init_model.setup(model,opt)
trainer = train.Trainer(model, criterion, optimizer, opt, logger)
validator = train.Validator(model, criterion, opt, logger)
if opt.resume:
if os.path.isfile(opt.resume):
model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
dataloader = init_data.load_data(opt)
train_loader = dataloader.train_loader
#print(utils.get_mean_and_std(train_loader))
val_loader = dataloader.val_loader
for epoch in range(opt.start_epoch, opt.epochs):
utils.adjust_learning_rate(opt, optimizer, epoch)
print("Starting epoch number:",epoch,"Learning rate:", opt.lr)
if opt.testOnly == False:
trainer.train(train_loader, epoch, opt)
if opt.tensorboard:
logger.scalar_summary('learning_rate', opt.lr, epoch)
prec1 = validator.validate(val_loader, epoch, opt)
best_prec1 = max(prec1, best_prec1)
init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch)
print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))
示例10: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
logging.basicConfig(level=logging.INFO)
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU (GPU is highly recommended)")
logging.info("Initializing model...")
# model = BaseModel(args, use_gpu)
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1),
num_labels=2)
if args.resume:
model.load_state_dict(torch.load(args.load_model))
if use_gpu:
model = model.cuda()
params = sum(np.prod(p.size()) for p in model.parameters())
logging.info("Number of parameters: {}".format(params))
if not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
train_dataset = BertDataset(args.input_train, "train")
dev_dataset = BertDataset(args.input_dev, "dev")
test_dataset = BertDataset(args.input_test, "test")
train_examples = len(train_dataset)
train_dataloader = \
BertDataLoader(train_dataset, mode="train", max_len=args.max_len, batch_size=args.batch_size, num_workers=4, shuffle=True)
dev_dataloader = \
BertDataLoader(dev_dataset, mode="dev", max_len=args.max_len, batch_size=args.batch_size, num_workers=4, shuffle=False)
test_dataloader = \
BertDataLoader(test_dataset, mode="test", max_len=args.max_len, batch_size=int(args.batch_size / 2), num_workers=4, shuffle=False)
trainer = Trainer(args, model, train_examples, use_gpu)
if args.resume == False:
logging.info("Beginning training...")
trainer.train(train_dataloader, dev_dataloader)
prediction, id = trainer.predict(test_dataloader)
with open(os.path.join(args.save_dir, "MG1833039.txt"), "w", encoding="utf-8") as f:
for index in range(len(prediction)):
f.write("{}\t{}\n".format(id[index], prediction[index]))
logging.info("Done!")
示例11: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main(task='mrpc',
train_cfg='config/train_mrpc.json',
model_cfg='config/bert_base.json',
data_file='../glue/MRPC/train.tsv',
model_file=None,
pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/mrpc',
max_len=128,
mode='train'):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
TaskDataset = dataset_class(task) # task dataset class according to the task
pipeline = [Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize),
AddSpecialTokensWithTruncation(max_len),
TokenIndexing(tokenizer.convert_tokens_to_ids,
TaskDataset.labels, max_len)]
dataset = TaskDataset(data_file, pipeline)
data_iter = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True)
model = Classifier(model_cfg, len(TaskDataset.labels))
criterion = nn.CrossEntropyLoss()
trainer = train.Trainer(cfg,
model,
data_iter,
optim.optim4GPU(cfg, model),
save_dir, get_device())
if mode == 'train':
def get_loss(model, batch, global_step): # make sure loss is a scalar tensor
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
loss = criterion(logits, label_id)
return loss
trainer.train(get_loss, model_file, pretrain_file, data_parallel)
elif mode == 'eval':
def evaluate(model, batch):
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
_, label_pred = logits.max(1)
result = (label_pred == label_id).float() #.cpu().numpy()
accuracy = result.mean()
return accuracy, result
results = trainer.eval(evaluate, model_file, data_parallel)
total_accuracy = torch.cat(results).mean().item()
print('Accuracy:', total_accuracy)
示例12: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main(train_cfg='config/pretrain.json',
model_cfg='config/bert_base.json',
data_file='../tbc/books_large_all.txt',
model_file=None,
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/pretrain',
log_dir='../exp/bert/pretrain/runs',
max_len=512,
max_pred=20,
mask_prob=0.15):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
tokenize = lambda x: tokenizer.tokenize(tokenizer.convert_to_unicode(x))
pipeline = [Preprocess4Pretrain(max_pred,
mask_prob,
list(tokenizer.vocab.keys()),
tokenizer.convert_tokens_to_ids,
max_len)]
data_iter = SentPairDataLoader(data_file,
cfg.batch_size,
tokenize,
max_len,
pipeline=pipeline)
model = BertModel4Pretrain(model_cfg)
criterion1 = nn.CrossEntropyLoss(reduction='none')
criterion2 = nn.CrossEntropyLoss()
optimizer = optim.optim4GPU(cfg, model)
trainer = train.Trainer(cfg, model, data_iter, optimizer, save_dir, get_device())
writer = SummaryWriter(log_dir=log_dir) # for tensorboardX
def get_loss(model, batch, global_step): # make sure loss is tensor
input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next = batch
logits_lm, logits_clsf = model(input_ids, segment_ids, input_mask, masked_pos)
loss_lm = criterion1(logits_lm.transpose(1, 2), masked_ids) # for masked LM
loss_lm = (loss_lm*masked_weights.float()).mean()
loss_clsf = criterion2(logits_clsf, is_next) # for sentence classification
writer.add_scalars('data/scalar_group',
{'loss_lm': loss_lm.item(),
'loss_clsf': loss_clsf.item(),
'loss_total': (loss_lm + loss_clsf).item(),
'lr': optimizer.get_lr()[0],
},
global_step)
return loss_lm + loss_clsf
trainer.train(get_loss, model_file, None, data_parallel)
示例13: train_main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def train_main(config, resume):
train_logger = Logger()
data_config = config['data']
t_transforms = _get_transform(config, 'train')
v_transforms = _get_transform(config, 'val')
print(t_transforms)
data_manager = getattr(data_module, config['data']['type'])(config['data'])
classes = data_manager.classes
t_loader = data_manager.get_loader('train', t_transforms)
v_loader = data_manager.get_loader('val', v_transforms)
m_name = config['model']['type']
model = getattr(net_module, m_name)(classes, config=config)
num_classes = len(classes)
loss = getattr(net_module, config['train']['loss'])
metrics = getattr(net_module, config['metrics'])(num_classes)
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
opt_name = config['optimizer']['type']
opt_args = config['optimizer']['args']
optimizer = getattr(torch.optim, opt_name)(trainable_params, **opt_args)
lr_name = config['lr_scheduler']['type']
lr_args = config['lr_scheduler']['args']
if lr_name == 'None':
lr_scheduler = None
else:
lr_scheduler = getattr(torch.optim.lr_scheduler, lr_name)(optimizer, **lr_args)
trainer = Trainer(model, loss, metrics, optimizer,
resume=resume,
config=config,
data_loader=t_loader,
valid_data_loader=v_loader,
lr_scheduler=lr_scheduler,
train_logger=train_logger)
trainer.train()
return trainer
#duration = 1; freq = 440
#os.system('play --no-show-progress --null --channels 1 synth %s sine %f'%(duration, freq))
示例14: main
# 需要导入模块: import train [as 别名]
# 或者: from train import Trainer [as 别名]
def main():
best_score = 0
args = parse()
if not args.no_logger:
tee.Tee(args.cache+'/log.txt')
print(vars(args))
print('experiment folder: {}'.format(experiment_folder()))
print('git hash: {}'.format(get_script_dir_commit_hash()))
seed(args.manual_seed)
cudnn.benchmark = not args.disable_cudnn_benchmark
cudnn.enabled = not args.disable_cudnn
metrics = get_metrics(args.metrics)
tasks = get_tasks(args.tasks)
model, criterion = get_model(args)
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), args.lr,
weight_decay=args.weight_decay)
else:
assert False, "invalid optimizer"
if args.resume:
best_score = checkpoints.load(args, model, optimizer)
print(model)
trainer = train.Trainer()
train_loader, val_loader = get_dataset(args)
if args.evaluate:
scores = validate(trainer, val_loader, model, criterion, args, metrics, tasks, -1)
print(scores)
score_file(scores, "{}/model_999.txt".format(args.cache))
return
if args.warmups > 0:
for i in range(args.warmups):
print('warmup {}'.format(i))
trainer.validate(train_loader, model, criterion, -1, metrics, args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
trainer.train_sampler.set_epoch(epoch)
scores = {}
scores.update(trainer.train(train_loader, model, criterion, optimizer, epoch, metrics, args))
scores.update(validate(trainer, val_loader, model, criterion, args, metrics, tasks, epoch))
is_best = scores[args.metric] > best_score
best_score = max(scores[args.metric], best_score)
checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric)