本文整理汇总了Python中config.grad_clip方法的典型用法代码示例。如果您正苦于以下问题:Python config.grad_clip方法的具体用法?Python config.grad_clip怎么用?Python config.grad_clip使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类config
的用法示例。
在下文中一共展示了config.grad_clip方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(model, optimizer, scheduler, ema, dataset, start, length):
model.train()
losses = []
for i in tqdm(range(start, length + start), total=length):
optimizer.zero_grad()
Cwid, Ccid, Qwid, Qcid, y1, y2, ids = dataset[i]
Cwid, Ccid, Qwid, Qcid = Cwid.to(device), Ccid.to(device), Qwid.to(device), Qcid.to(device)
p1, p2 = model(Cwid, Ccid, Qwid, Qcid)
y1, y2 = y1.to(device), y2.to(device)
loss1 = F.nll_loss(p1, y1, reduction='mean')
loss2 = F.nll_loss(p2, y2, reduction='mean')
loss = (loss1 + loss2) / 2
losses.append(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
for name, p in model.named_parameters():
if p.requires_grad: ema.update_parameter(name, p)
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
loss_avg = np.mean(losses)
print("STEP {:8d} loss {:8f}\n".format(i + 1, loss_avg))
示例2: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(train_loader, model, optimizer, epoch, logger):
model.train() # train mode (dropout and batchnorm is used)
losses = AverageMeter()
# Batches
for i, (img, alpha_label) in enumerate(train_loader):
# Move to GPU, if available
img = img.type(torch.FloatTensor).to(device) # [N, 4, 320, 320]
alpha_label = alpha_label.type(torch.FloatTensor).to(device) # [N, 2, 320, 320]
alpha_label = alpha_label.reshape((-1, 2, im_size * im_size)) # [N, 2, 320*320]
# Forward prop.
alpha_out = model(img) # [N, 320, 320]
alpha_out = alpha_out.reshape((-1, 1, im_size * im_size)) # [N, 320*320]
# Calculate loss
# loss = criterion(alpha_out, alpha_label)
loss = alpha_prediction_loss(alpha_out, alpha_label)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients
clip_gradient(optimizer, grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
# Print status
if i % print_freq == 0:
status = 'Epoch: [{0}][{1}/{2}]\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses)
logger.info(status)
return losses.avg
示例3: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(train_loader, model, optimizer, epoch, logger):
model.train() # train mode (dropout and batchnorm is used)
losses = AverageMeter()
# Batches
for i, (img, alpha_label) in enumerate(train_loader):
# Move to GPU, if available
img = img.type(torch.FloatTensor).to(device) # [N, 4, 320, 320]
alpha_label = alpha_label.type(torch.FloatTensor).to(device) # [N, 320, 320]
alpha_label = alpha_label.reshape((-1, 2, im_size * im_size)) # [N, 320*320]
# Forward prop.
alpha_out = model(img) # [N, 3, 320, 320]
alpha_out = alpha_out.reshape((-1, 1, im_size * im_size)) # [N, 320*320]
# Calculate loss
# loss = criterion(alpha_out, alpha_label)
loss = alpha_prediction_loss(alpha_out, alpha_label)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients
clip_gradient(optimizer, grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
# Print status
if i % print_freq == 0:
status = 'Epoch: [{0}][{1}/{2}]\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses)
logger.info(status)
return losses.avg
示例4: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(train_loader, model, metric_fc, criterion, optimizer, epoch):
model.train() # train mode (dropout and batchnorm is used)
metric_fc.train()
losses = AverageMeter()
top1_accs = AverageMeter()
# Batches
for i, (img, label) in enumerate(train_loader):
# Move to GPU, if available
img = img.to(device)
label = label.to(device) # [N, 1]
# Forward prop.
feature = model(img) # embedding => [N, 512]
output = metric_fc(feature, label) # class_id_out => [N, 10575]
# Calculate loss
loss = criterion(output, label)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients
clip_gradient(optimizer, grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
top1_accuracy = accuracy(output, label, 1)
top1_accs.update(top1_accuracy)
# Print status
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top1 Accuracy {top1_accs.val:.3f} ({top1_accs.avg:.3f})'.format(epoch, i, len(train_loader),
loss=losses,
top1_accs=top1_accs))
return losses.avg, top1_accs.avg
示例5: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(train_loader, model, metric_fc, criterion, optimizer, epoch, logger):
model.train() # train mode (dropout and batchnorm is used)
metric_fc.train()
losses = AverageMeter()
top1_accs = AverageMeter()
# Batches
for i, (img, label) in enumerate(train_loader):
# Move to GPU, if available
img = img.to(device)
label = label.to(device) # [N, 1]
# Forward prop.
feature = model(img) # embedding => [N, 512]
output = metric_fc(feature, label) # class_id_out => [N, 10575]
# Calculate loss
loss = criterion(output, label)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients
optimizer.clip_gradient(grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
top1_accuracy = accuracy(output, label, 1)
top1_accs.update(top1_accuracy)
# Print status
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top1 Accuracy {top1_accs.val:.3f} ({top1_accs.avg:.3f})'.format(epoch, i, len(train_loader),
loss=losses,
top1_accs=top1_accs))
return losses.avg, top1_accs.avg
开发者ID:LcenArthas,项目名称:CCF-BDCI2019-Multi-person-Face-Recognition-Competition-Baseline,代码行数:46,代码来源:train.py
示例6: train
# 需要导入模块: import config [as 别名]
# 或者: from config import grad_clip [as 别名]
def train(train_loader, model, metric_fc, criterion, optimizer, epoch, logger):
model.train() # train mode (dropout and batchnorm is used)
metric_fc.train()
losses = AverageMeter()
top5_accs = AverageMeter()
# Batches
for i, (img, label) in enumerate(train_loader):
# Move to GPU, if available
img = img.to(device)
label = label.to(device) # [N, 1]
# Forward prop.
feature = model(img) # embedding => [N, 512]
output = metric_fc(feature, label) # class_id_out => [N, 93431]
# Calculate loss
loss = criterion(output, label)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients
optimizer.clip_gradient(grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
top5_accuracy = accuracy(output, label, 5)
top5_accs.update(top5_accuracy)
# Print status
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top5 Accuracy {top5_accs.val:.3f} ({top5_accs.avg:.3f})'.format(epoch, i, len(train_loader),
loss=losses,
top5_accs=top5_accs))
return losses.avg, top5_accs.avg