本文整理汇总了Python中torch.no_grad方法的典型用法代码示例。如果您正苦于以下问题:Python torch.no_grad方法的具体用法?Python torch.no_grad怎么用?Python torch.no_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.no_grad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def run(self):
self.set_logger()
# Initialize progress bar
bar = utils.set_progress_bar(self.total_iters)
for cycle_num in range(int(self.total_iters / self.iters)):
self.model.train()
self.cycle(bar, cycle_num)
with torch.no_grad():
self.run_evaluation_cycle()
self.log_losses(self.opt, self.losses)
self.update_top_score(self.opt)
self.save_model(self.get_tracked_score())
self.stop_logger()
示例2: hard_mining
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(
feats[:self.bbox_roi_extractor.num_inputs], rois)
cls_score, _ = self.bbox_head(bbox_feats)
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
示例3: single_gpu_test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
# encode mask results
if isinstance(result, tuple):
bbox_results, mask_results = result
encoded_mask_results = encode_mask_results(mask_results)
result = bbox_results, encoded_mask_results
results.append(result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
示例4: evaluate_accuracy
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def evaluate_accuracy(data_iter, net,
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
if isinstance(net, torch.nn.Module):
net.eval() # 评估模式,会关闭 dropout
acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
net.train() # 改回训练模式
else:
# 如果是自定义的模型
if 'is_training' in net.__code__.co_varnames:
acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
示例5: predict
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def predict(self, data, is_train=False) -> torch.Tensor or dict:
"""
Make predict by data. If ``is_train`` is ``True`` - this operation will compute gradients. If
``is_train`` is ``False`` - this will work with ``model.eval()`` and ``torch.no_grad``
:param data: data in dict
:param is_train: is data processor need train on data or just predict
:return: processed output
:rtype: model return type
"""
if is_train:
self.model().train()
output = self._model(self._pick_model_input(data))
else:
output = super().predict(data)
return output
示例6: test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def test(self, dataset):
self.model.eval()
with torch.no_grad():
total_loss = 0.0
predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Testing epoch ' + str(self.epoch) + ''):
ltree, linput, rtree, rinput, label = dataset[idx]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
output = output.squeeze().to('cpu')
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
示例7: fuse_conv_and_bn
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = torch.nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
if conv.bias is not None:
b_conv = conv.bias
else:
b_conv = torch.zeros(conv.weight.size(0))
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(b_conv + b_bn)
return fusedconv
示例8: test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例9: select_action
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def select_action(self, state):
"""
The action selection function, it either uses the model to choose an action or samples one uniformly.
:param state: current state of the model
:return:
"""
if self.cuda:
state = state.cuda()
sample = random.random()
eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
-1. * self.current_iteration / self.config.eps_decay)
self.current_iteration += 1
if sample > eps_threshold:
with torch.no_grad():
return self.policy_model(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long)
示例10: validate
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def validate(self):
"""
One cycle of model validation
:return:
"""
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in self.data_loader.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(self.data_loader.test_loader.dataset)
self.logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(self.data_loader.test_loader.dataset),
100. * correct / len(self.data_loader.test_loader.dataset)))
示例11: extract_feature
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def extract_feature(model, model_path, dataloader, source, data_name):
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
model.eval()
fea = torch.zeros(1, 501).to(DEVICE)
with torch.no_grad():
for inputs, labels in dataloader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
x = model.get_feature(inputs)
x = x.view(x.size(0), -1)
labels = labels.view(labels.size(0), 1).float()
x = torch.cat((x, labels), dim=1)
fea = torch.cat((fea, x), dim=0)
fea_numpy = fea.cpu().numpy()
np.savetxt('{}_{}.csv'.format(source, data_name), fea_numpy[1:], fmt='%.6f', delimiter=',')
print('{} - {} done!'.format(source, data_name))
# You may want to use this function to simply classify them after getting features
示例12: extract_feature
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def extract_feature(model, dataloader, save_path, load_from_disk=True, model_path=''):
if load_from_disk:
model = models.Network(base_net=args.model_name,
n_class=args.num_class)
model.load_state_dict(torch.load(model_path))
model = model.to(DEVICE)
model.eval()
correct = 0
fea_all = torch.zeros(1,1+model.base_network.output_num()).to(DEVICE)
with torch.no_grad():
for inputs, labels in dataloader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
feas = model.get_features(inputs)
labels = labels.view(labels.size(0), 1).float()
x = torch.cat((feas, labels), dim=1)
fea_all = torch.cat((fea_all, x), dim=0)
outputs = model(inputs)
preds = torch.max(outputs, 1)[1]
correct += torch.sum(preds == labels.data.long())
test_acc = correct.double() / len(dataloader.dataset)
fea_numpy = fea_all.cpu().numpy()
np.savetxt(save_path, fea_numpy[1:], fmt='%.6f', delimiter=',')
print('Test acc: %f' % test_acc)
# You may want to classify with 1nn after getting features
示例13: test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(DEVICE), target.to(DEVICE)
out = model(data, data, target, DEVICE)
s_output = out[0]
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).item() # sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
示例14: test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def test(model):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for tgt_test_data, tgt_test_label in tgt_test_loader:
if cuda:
tgt_test_data, tgt_test_label = tgt_test_data.cuda(), tgt_test_label.cuda()
tgt_test_data, tgt_test_label = Variable(tgt_test_data), Variable(tgt_test_label)
tgt_pred, mmd_loss = model(tgt_test_data, tgt_test_data)
test_loss += F.nll_loss(F.log_softmax(tgt_pred, dim = 1), tgt_test_label, reduction='sum').item() # sum up batch loss
pred = tgt_pred.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(tgt_test_label.data.view_as(pred)).cpu().sum()
test_loss /= tgt_dataset_len
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
tgt_name, test_loss, correct, tgt_dataset_len,
100. * correct / tgt_dataset_len))
return correct
示例15: test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import no_grad [as 别名]
def test(model, target_test_loader):
model.eval()
test_loss = utils.AverageMeter()
correct = 0
criterion = torch.nn.CrossEntropyLoss()
len_target_dataset = len(target_test_loader.dataset)
with torch.no_grad():
for data, target in target_test_loader:
data, target = data.to(DEVICE), target.to(DEVICE)
s_output = model.predict(data)
loss = criterion(s_output, target)
test_loss.update(loss.item())
pred = torch.max(s_output, 1)[1]
correct += torch.sum(pred == target)
print('{} --> {}: max correct: {}, accuracy{: .2f}%\n'.format(
source_name, target_name, correct, 100. * correct / len_target_dataset))