本文整理匯總了Python中utils.tools.IteratorTimer方法的典型用法代碼示例。如果您正苦於以下問題:Python tools.IteratorTimer方法的具體用法?Python tools.IteratorTimer怎麽用?Python tools.IteratorTimer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils.tools
的用法示例。
在下文中一共展示了tools.IteratorTimer方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test
# 需要導入模塊: from utils import tools [as 別名]
# 或者: from utils.tools import IteratorTimer [as 別名]
def test(args, epoch, model, data_loader):
"""
TESTING PROCEDURE
Parameters:
-----------
- args: various arguments
- epoch: number of epochs
- model: specified model to test
- data_loader: specified test data_loader
Returns:
--------
- average_loss: average loss per batch
- pck: Percentage of Correct Keypoints metric
"""
statistics = []
total_loss = 0
model.eval()
title = 'Validating Epoch {}'.format(epoch)
progress = tqdm(tools.IteratorTimer(data_loader), ncols=120, total=len(data_loader), smoothing=.9, miniters=1, leave=True, desc=title)
predictions = []
gt = []
sys.stdout.flush()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(progress):
d = model(data[0].to(args.device), im_2 = data[1].to(args.device))
loss = _apply_loss(d, target).mean()
total_loss += loss.item()
predictions.extend(d.numpy())
gt.extend(target.numpy())
# Print out statistics
statistics.append(loss.item())
title = '{} Epoch {}'.format('Validating', epoch)
progress.set_description(title + '\tLoss:\t'+ str(statistics[-1]))
sys.stdout.flush()
progress.close()
pck = tools.calc_pck(np.asarray(predictions), np.asarray(gt))
print('PCK for epoch %d is %f'%(epoch, pck))
return total_loss / float(batch_idx + 1), pck
示例2: train
# 需要導入模塊: from utils import tools [as 別名]
# 或者: from utils.tools import IteratorTimer [as 別名]
def train(args, epoch, model, data_loader, optimizer):
"""
TRAINING PROCEDURE
Parameters:
-----------
- args: various arguments
- epoch: number of epochs
- model: specified model to test
- data_loader: specified train data_loader
- optimizer: specified optimizer to use
Returns:
--------
- average_loss: average loss per batch
"""
statistics = []
total_loss = 0
model.train()
title = 'Training Epoch {}'.format(epoch)
progress = tqdm(tools.IteratorTimer(data_loader), ncols=120, total=len(data_loader), smoothing=.9, miniters=1, leave=True, desc=title)
sys.stdout.flush()
for batch_idx, (data, target) in enumerate(progress):
#data, target = data.to(args.device), target.to(args.device)
optimizer.zero_grad()
d = model(data[0].to(args.device), im_2 = data[1].to(args.device))
loss = _apply_loss(d, target).mean()
loss.backward()
optimizer.step()
total_loss += loss.item()
assert not np.isnan(total_loss)
# Print out statistics
statistics.append(loss.item())
title = '{} Epoch {}'.format('Training', epoch)
progress.set_description(title + '\tLoss:\t'+ str(statistics[-1]))
sys.stdout.flush()
progress.close()
return total_loss / float(batch_idx + 1)
# ====================================================================================================================================
# MAIN PROCEDURE
# =========================