本文整理汇总了Python中torch.autograd.Variable.view_as方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.view_as方法的具体用法?Python Variable.view_as怎么用?Python Variable.view_as使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.view_as方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eval
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view_as [as 别名]
def eval():
netE.eval()
netW.eval()
netG.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
i = 0
display_count = 0
average_loss = 0
rank_all_tmp = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques, tans = question[:,rnd,:].t(), opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
#ans_emb = ans_emb.view(ans_length, -1, 100, opt.nhid)
ans_score = torch.FloatTensor(batch_size, 100).zero_()
# extend the hidden
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW(ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, ans_target.view(-1,1))
mask = ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
sys.stdout.write('Evaluating: {:d}/{:d} \r' \
.format(i, len(dataloader_val)))
if i % 50 == 0:
R1 = np.sum(np.array(rank_all_tmp)==1) / float(len(rank_all_tmp))
R5 = np.sum(np.array(rank_all_tmp)<=5) / float(len(rank_all_tmp))
R10 = np.sum(np.array(rank_all_tmp)<=10) / float(len(rank_all_tmp))
ave = np.sum(np.array(rank_all_tmp)) / float(len(rank_all_tmp))
mrr = np.sum(1/(np.array(rank_all_tmp, dtype='float'))) / float(len(rank_all_tmp))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(1, len(dataloader_val), mrr, R1, R5, R10, ave))
return rank_all_tmp
示例2: val
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view_as [as 别名]
def val():
netE.eval()
netW.eval()
netG.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
i = 0
average_loss = 0
rank_all_tmp = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques, tans = question[:,rnd,:].t(), opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW(ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, ans_target.view(-1,1))
mask = ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
return rank_all_tmp, average_loss
示例3: val
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view_as [as 别名]
def val():
netE_g.eval()
netE_d.eval()
netW_g.eval()
netW_d.eval()
netG.eval()
netD.eval()
n_neg = 100
ques_hidden1 = netE_g.init_hidden(opt.batchSize)
ques_hidden2 = netE_d.init_hidden(opt.batchSize)
hist_hidden1 = netE_d.init_hidden(opt.batchSize)
hist_hidden2 = netE_g.init_hidden(opt.batchSize)
opt_hidden = netD.init_hidden(opt.batchSize)
data_iter_val = iter(dataloader_val)
count = 0
i = 0
rank_G = []
rank_D = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
opt_ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
opt_tans = opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
opt_len = opt_answerLen[:,rnd,:].clone().view(-1)
ques_input.data.resize_(ques.size()).copy_(ques)
his_input.data.resize_(his.size()).copy_(his)
opt_ans_input.data.resize_(opt_ans.size()).copy_(opt_ans)
opt_ans_target.data.resize_(opt_tans.size()).copy_(opt_tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb_g = netW_g(ques_input, format = 'index')
his_emb_g = netW_g(his_input, format = 'index')
ques_emb_d = netW_d(ques_input, format = 'index')
his_emb_d = netW_d(his_input, format = 'index')
ques_hidden1 = repackage_hidden(ques_hidden1, batch_size)
ques_hidden2 = repackage_hidden(ques_hidden2, batch_size)
hist_hidden1 = repackage_hidden(hist_hidden1, his_emb_g.size(1))
hist_hidden2 = repackage_hidden(hist_hidden2, his_emb_d.size(1))
featG, ques_hidden1 = netE_g(ques_emb_g, his_emb_g, img_input, \
ques_hidden1, hist_hidden1, rnd+1)
featD, _ = netE_d(ques_emb_d, his_emb_d, img_input, \
ques_hidden2, hist_hidden2, rnd+1)
#featD = l2_norm(featD)
# Evaluate the Generator:
_, ques_hidden1 = netG(featG.view(1,-1,opt.ninp), ques_hidden1)
#_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
# extend the hidden
hidden_replicated = []
for hid in ques_hidden1:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW_g(opt_ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, opt_ans_target.view(-1,1))
mask = opt_ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_G += list(rank.view(-1).data.cpu().numpy())
#.........这里部分代码省略.........