本文整理汇总了Python中torch.save函数的典型用法代码示例。如果您正苦于以下问题:Python save函数的具体用法?Python save怎么用?Python save使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了save函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_word_vectors
def load_word_vectors(path):
if os.path.isfile(path+'.pth') and os.path.isfile(path+'.vocab'):
print('==> File found, loading to memory')
vectors = torch.load(path+'.pth')
vocab = Vocab(filename=path+'.vocab')
return vocab, vectors
# saved file not found, read from txt file
# and create tensors for word vectors
print('==> File not found, preparing, be patient')
count = sum(1 for line in open(path+'.txt',encoding='latin-1'))
with open(path+'.txt','r') as f:
contents = f.readline().rstrip('\n').split(' ')
dim = len(contents[1:])
words = [None]*(count)
vectors = torch.zeros(count,dim)
with open(path+'.txt','r',encoding='latin-1') as f:
idx = 0
for line in f:
contents = line.rstrip('\n').split(' ')
words[idx] = contents[0]
#print(contents[1:])
vectors[idx] = torch.Tensor(list(map(float, contents[1:])))
idx += 1
with open(path+'.vocab','w',encoding='latin-1') as f:
for word in words:
f.write(word+'\n')
vocab = Vocab(filename=path+'.vocab')
torch.save(vectors, path+'.pth')
return vocab, vectors
示例2: download
def download(self):
if self._check_datafile_exists():
print('# Found cached data {}'.format(self.data_file))
return
if not self._check_downloaded():
# download files
url = self.urls[self.name][0]
filename = self.urls[self.name][1]
md5 = self.urls[self.name][2]
fpath = os.path.join(self.root, filename)
download_url(url, self.root, filename, md5)
print('# Extracting data {}\n'.format(self.data_down))
import zipfile
with zipfile.ZipFile(fpath, 'r') as z:
z.extractall(self.data_dir)
os.unlink(fpath)
# process and save as torch files
print('# Caching data {}'.format(self.data_file))
dataset = (
read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
read_info_file(self.data_dir, self.info_file),
read_matches_files(self.data_dir, self.matches_files)
)
with open(self.data_file, 'wb') as f:
torch.save(dataset, f)
示例3: fit
def fit(self, train_loader, dev_loader, test_loader,
epochs, interval, eta, file):
# 记录迭代时间
total_time = timedelta()
# 记录最大准确率及对应的迭代次数
max_e, max_acc = 0, 0.0
# 设置优化器为Adam
self.optimizer = optim.Adam(params=self.parameters(), lr=eta)
for epoch in range(1, epochs + 1):
start = datetime.now()
# 更新参数
self.update(train_loader)
print(f"Epoch: {epoch} / {epochs}:")
loss, train_acc = self.evaluate(train_loader)
print(f"{'train:':<6} Loss: {loss:.4f} Accuracy: {train_acc:.2%}")
loss, dev_acc = self.evaluate(dev_loader)
print(f"{'dev:':<6} Loss: {loss:.4f} Accuracy: {dev_acc:.2%}")
loss, test_acc = self.evaluate(test_loader)
print(f"{'test:':<6} Loss: {loss:.4f} Accuracy: {test_acc:.2%}")
t = datetime.now() - start
print(f"{t}s elapsed\n")
total_time += t
# 保存效果最好的模型
if dev_acc > max_acc:
torch.save(self, file)
max_e, max_acc = epoch, dev_acc
elif epoch - max_e >= interval:
break
print(f"max accuracy of dev is {max_acc:.2%} at epoch {max_e}")
print(f"mean time of each epoch is {total_time / epoch}s\n")
示例4: train
def train(train_iter, dev_iter, test_iter, model_lstm, text_field, label_field, args):
loss_function = nn.NLLLoss()
optimizer = optim.Adam(model_lstm.parameters(), lr=1e-3)
best_test_acc = 0.0
no_up = 0
for i in range(1, args.epochs+1):
print('epoch: %d start!' % i)
train_epoch(model_lstm, train_iter, dev_iter, test_iter, loss_function, optimizer, i, args)
dev_acc = evaluate(model_lstm, dev_iter, loss_function, 'dev')
test_acc = evaluate(model_lstm, test_iter, loss_function, 'test')
if test_acc > best_test_acc:
print('New Best Test!!!')
best_test_acc = test_acc
# os.system('rm best_models/mr_best_model_minibatch_acc_*.model')
if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)
save_prefix = os.path.join(args.save_dir, 'snapshot')
save_path = '{}epoch{}.pt'.format(save_prefix, i)
# torch.save(model_lstm.state_dict(),'best_models/mr_best_model_minibatch_acc_' + str(int(test_acc * 10000)) + '.model')
torch.save(model_lstm, save_path)
no_up = 0
else:
no_up += 1
if no_up >= 10:
exit()
print('now best test acc:', best_test_acc)
示例5: save
def save(self, save_optimizer=False, save_path=None, **kwargs):
"""serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
"""
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
示例6: extract_features_targets
def extract_features_targets(model, features_size, loader, path_data, cuda=False):
if os.path.isfile(path_data):
print('Load features from {}'.format(path_data))
return torch.load(path_data)
print('\nExtract features on {}set'.format(loader.dataset.set))
features = torch.Tensor(len(loader.dataset), features_size)
targets = torch.Tensor(len(loader.dataset), len(loader.dataset.classes))
for batch_id, batch in enumerate(tqdm(loader)):
img = batch[0]
target = batch[2]
current_bsize = img.size(0)
from_ = int(batch_id * loader.batch_size)
to_ = int(from_ + current_bsize)
if cuda:
img = img.cuda(async=True)
input = Variable(img, requires_grad=False)
output = model(input)
features[from_:to_] = output.data.cpu()
targets[from_:to_] = target
os.system('mkdir -p {}'.format(os.path.dirname(path_data)))
print('save ' + path_data)
torch.save((features, targets), path_data)
print('')
return features, targets
示例7: save
def save(net, filename):
if isinstance(net, nn.DataParallel):
net = net.module
data = dict(args=net.args,
state_dict=net.state_dict())
torch.save(data, filename)
示例8: _comput_mean
def _comput_mean(self):
meanstd_file = './data/300W_LP/mean.pth.tar'
if os.path.isfile(meanstd_file):
ms = torch.load(meanstd_file)
else:
print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
mean = torch.zeros(3)
std = torch.zeros(3)
if self.is_train:
for i in range(self.total):
a = self.anno[i]
img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
self.anno[i][:-8] + '.jpg')
img = load_image(img_path)
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= self.total
std /= self.total
ms = {
'mean': mean,
'std': std,
}
torch.save(ms, meanstd_file)
if self.is_train:
print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
print('\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
return ms['mean'], ms['std']
示例9: test_load_to_gpu_from_gpu
def test_load_to_gpu_from_gpu(self):
# This test will make sure that the initializer works on the GPU
self.net1.cuda(device=0)
self.net2.cuda(device=0)
# Verify the parameters are on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
assert self.net2.linear_1.weight.is_cuda is True
assert self.net2.linear_1.bias.is_cuda is True
# We need to manually save the parameters to a file because setUp()
# only does it for the CPU
temp_file = self.TEST_DIR / "gpu_weights.th"
torch.save(self.net2.state_dict(), temp_file)
applicator = self._get_applicator("linear_1.*", temp_file)
applicator(self.net1)
# Verify the parameters are still on the GPU
assert self.net1.linear_1.weight.is_cuda is True
assert self.net1.linear_1.bias.is_cuda is True
assert self.net2.linear_1.weight.is_cuda is True
assert self.net2.linear_1.bias.is_cuda is True
# Make sure the weights are identical
assert self._are_equal(self.net1.linear_1, self.net2.linear_1)
示例10: save
def save():
# save net1
net1 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1)
)
optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)
loss_func = torch.nn.MSELoss()
for t in range(100):
prediction = net1(x)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# plot result
plt.figure(1, figsize=(10, 3))
plt.subplot(131)
plt.title('Net1')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
# 2 ways to save the net
torch.save(net1, 'net.pkl') # save entire net
torch.save(net1.state_dict(), 'net_params.pkl') # save only the parameters
示例11: test
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
#test_loss += loss.data[0]
test_loss+=loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# Save checkpoint.
acc = 100. * correct / total
if acc > best_acc:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.t7')
best_acc = acc
示例12: save_checkpoint
def save_checkpoint(model, output_path):
## if not os.path.exists(output_dir):
## os.makedirs("model/")
torch.save(model, output_path)
print("Checkpoint saved to {}".format(output_path))
示例13: save_model
def save_model(self):
path = self.config.data_path
if os.path.isdir('data'):
path = 'data/{0}'.format(self.config.data_path)
print('save model parameters to {0}'.format(path))
torch.save(self.model.state_dict(), path)
示例14: decompose_model_seq
def decompose_model_seq(model, layer_name, model_file):
print(model)
model.cpu()
for i, (name, conv_layer) in enumerate(model.named_modules()):
## for sequential nets, 'in' is sufficient
## as long as there are not 2 homonimous layers
if layer_name in name:
print(name)
if args.cp:
rank = max(conv_layer.weight.data.shape) // 3
rank, _ = choose_compression(
conv_layer, ranks=[rank, rank], compression_factor=5, flag='cpd')
print('rank: ', rank)
rank = cp_ranks(conv_layer)
print('rank: ', rank)
decomposed = cp_decomposition_conv_layer_BN(conv_layer, rank, matlab=False)
# decomposed = cp_xavier_conv_layer(conv_layer, rank)
else:
decomposed = tucker_decomposition_conv_layer(conv_layer)
# first modules return a sequential, then we need to call the proper layer
model._modules['sequential']._modules[layer_name] = decomposed
torch.save(model, model_file)
return model
示例15: test_serialization_built_vocab
def test_serialization_built_vocab(self):
self.write_test_ppid_dataset(data_format="tsv")
question_field = data.Field(sequential=True)
tsv_fields = [("id", None), ("q1", question_field),
("q2", question_field), ("label", None)]
tsv_dataset = data.TabularDataset(
path=self.test_ppid_dataset_path, format="tsv",
fields=tsv_fields)
question_field.build_vocab(tsv_dataset)
question_pickle_filename = "question.pl"
question_pickle_path = os.path.join(self.test_dir, question_pickle_filename)
torch.save(question_field, question_pickle_path)
loaded_question_field = torch.load(question_pickle_path)
assert loaded_question_field == question_field
test_example_data = [["When", "do", "you", "use", "シ",
"instead", "of", "し?"],
["What", "is", "2+2", "<pad>", "<pad>",
"<pad>", "<pad>", "<pad>"],
["Here", "is", "a", "sentence", "with",
"some", "oovs", "<pad>"]]
# Test results of numericalization
original_numericalization = question_field.numericalize(test_example_data)
pickled_numericalization = loaded_question_field.numericalize(test_example_data)
assert torch.all(torch.eq(original_numericalization, pickled_numericalization))