本文整理汇总了Python中utils.repackage_hidden方法的典型用法代码示例。如果您正苦于以下问题:Python utils.repackage_hidden方法的具体用法?Python utils.repackage_hidden怎么用?Python utils.repackage_hidden使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.repackage_hidden方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, source_sampler, target_sampler, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
if args.model == 'QRNN':
model.reset()
total_loss = 0
hidden = model.init_hidden(batch_size)
for source_sample, target_sample in zip(source_sampler, target_sampler):
model.train()
data = torch.stack([data_source[i] for i in source_sample])
targets = torch.stack([data_source[i] for i in target_sample]).view(-1)
with torch.no_grad():
output, hidden = model(data, hidden)
total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output,
targets).item()
hidden = repackage_hidden(hidden)
return total_loss / len(data_source)
示例2: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += len(data) * loss
hidden = repackage_hidden(hidden)
return total_loss.item() / len(data_source)
示例3: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss.item() / len(data_source)
示例4: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
示例5: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
print(i, data_source.size(0)-1)
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.
示例6: eval_pred
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def eval_pred(dr_model, ub):
'''
evaluate dream model for predicting next basket on all training users
in batches
'''
item_embedding = dr_model.encode.weight
dr_model.eval()
dr_hidden = dr_model.init_hidden(dr_model.config.batch_size)
start_time = time()
id_u, score_u = [], [] # user's id, user's score
num_batchs = ceil(len(ub) / dr_model.config.batch_size)
for i,x in enumerate(batchify(ub, dr_model.config.batch_size)):
print(i)
baskets, lens, uids = x
_, dynamic_user, _ = dr_model(baskets, lens, dr_hidden)# shape: batch_size, max_len, embedding_size
dr_hidden = repackage_hidden(dr_hidden)
for i,l,du in zip(uids, lens, dynamic_user):
du_latest = du[l - 1].unsqueeze(0) # shape: 1, embedding_size
score_up = torch.mm(du_latest, item_embedding.t()) # shape: 1, num_item
score_u.append(score_up.cpu().data.numpy())
id_u.append(i)
elapsed = time() - start_time
print('[Predicting] Elapsed: {02.2f}'.format(elapsed))
return score_ub, id_u
示例7: evaluate_dream
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate_dream():
dr_model.eval()
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(test_ub) / dr_config.batch_size)
for i, x in enumerate(batchify(test_ub, dr_config.batch_size)):
baskets, lens, _ = x
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = bpr_loss(baskets, dynamic_user, dr_model.encode.weight, dr_config)
dr_hidden = repackage_hidden(dr_hidden)
total_loss += loss.data
# Logging
elapsed = (time() - start_time) * 1000 / num_batchs
total_loss = total_loss[0] / num_batchs / dr_config.batch_size
writer.add_scalar('model/eval_loss', total_loss, (epoch + 1) * num_batchs)
writer.add_scalar('model/eval_loss', total_loss, (epoch + 1) * num_batchs)
print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss))
return total_loss
示例8: evaluate_reorder_dream
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate_reorder_dream():
dr_model.eval()
dr_hidden = dr_model.init_hidden(dr_config.batch_size)
total_loss = 0
start_time = time()
num_batchs = ceil(len(test_ub) / dr_config.batch_size)
for i, x in enumerate(batchify(test_ub, dr_config.batch_size, is_reordered=True)):
baskets, lens, _, r_baskets, h_baskets = x
dynamic_user, _ = dr_model(baskets, lens, dr_hidden)
loss = reorder_bpr_loss(r_baskets, h_baskets, dynamic_user, dr_model.encode.weight, dr_config)
dr_hidden = repackage_hidden(dr_hidden)
total_loss += loss.data
# Logging
elapsed = (time() - start_time) * 1000 / num_batchs
total_loss = total_loss[0] / num_batchs / dr_config.batch_size
print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss))
return total_loss
示例9: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
print(i, data_source.size(0)-1)
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden, args.arc)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.
示例10: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
示例11: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
if args.model == 'QRNN': model.reset()
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
示例12: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
if args.model == 'QRNN': model.reset()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
示例13: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size, seq_len):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
tokens = 0
n = 0
save_all_losses = []
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, seq_len):
tokens += seq_len
data, targets = get_batch(data_source, i, args, evaluation=True, seq_len=seq_len)
output, hidden = model(data, hidden)
output = nn.functional.log_softmax(output.permute(2,1,0)).permute(2,1,0)
targets = targets.view(data.data.shape[0], batch_size, -1)
CELoss = torch.gather(output.data, dim=2, index=targets.data).squeeze()
CELoss = -1*CELoss
if tokens < args.start_token: continue # We are not ready to accumulate error yet
elif tokens >= args.start_token and tokens-seq_len < args.start_token:
data.data = data.data[-(tokens-args.start_token+1):]
CELoss = CELoss[-(tokens-args.start_token+1):]
print('First word: %s' % (corpus.dictionary.idx2word[data.data[-(tokens-args.start_token+1),0]]))
total_loss += torch.sum(CELoss)
n += data.size(0)
save_all_losses += CELoss.tolist()
hidden = repackage_hidden(hidden)
print('total: %d' % n)
print('Last word: %s' % (corpus.dictionary.idx2word[data.data[-1,0]]))
return total_loss / float(n), save_all_losses
示例14: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
示例15: evaluate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import repackage_hidden [as 别名]
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.