本文整理汇总了Python中torch.autograd.Variable.transpose方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.transpose方法的具体用法?Python Variable.transpose怎么用?Python Variable.transpose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _pad_packed_sequence
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def _pad_packed_sequence(sequence, batch_first=False, padding_value=0):
var_data, batch_sizes = sequence
max_batch_size = int(batch_sizes[0])
output = var_data.data.new(len(batch_sizes), max_batch_size, *var_data.size()[1:]).fill_(padding_value)
output = Variable(output)
lengths = []
data_offset = 0
prev_batch_size = int(batch_sizes[0])
prev_i = 0
for i, batch_size in enumerate(batch_sizes.tolist() + [0]):
if batch_size != prev_batch_size:
l = prev_batch_size * (i - prev_i)
tmp = var_data[data_offset:data_offset + l]
output[prev_i:i, :prev_batch_size] = tmp.view(i - prev_i, prev_batch_size, *tmp.size()[1:])
data_offset += l
prev_i = i
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
lengths.reverse()
if batch_first:
output = output.transpose(0, 1)
# This Variable doesn't actually have any history (well,
# technically it does; it's just untracked), it is purely here to
# make ONNX export easier. That is to say, from an autodiff
# standpoint this doesn't make any sense.
return output, Variable(torch.LongTensor(lengths))
示例2: predict
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def predict(model, test_loader):
# switch to evaluate mode
model.eval()
################Note##############################
# each sample may have different number of points
# so just use batch of size 1
##################################################
debug_here()
for i, (points_data, _seg_data, labels) in enumerate(test_loader, 0):
if i%10 == 0:
print('{0}/{1}'.format(i, len(test_loader)))
# print(points_data.size())
points_data = Variable(points_data, volatile=True)
points_data = points_data.transpose(2, 1)
_seg_data = Variable(_seg_data, volatile=True)
if opt.cuda:
points_data = points_data.cuda()
_seg_data = _seg_data.long().cuda() # must be long cuda tensor
# forward, backward optimize
pred, _ = model(points_data)
pred = pred.view(-1, opt.num_seg_classes)
_seg_data = _seg_data.view(-1, 1)[:, 0] # min is already 0
pred_choice = pred.data.max(1)[1]
print('finished loading')
示例3: CrossEntropyLoss
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def CrossEntropyLoss(self, logits, labels):
cost_func = nn.CrossEntropyLoss()
labels = Variable(labels).long().cuda()
labels = labels.transpose(0, 1)
loss = (cost_func(logits[0], labels[0])+ cost_func(logits[1], labels[1])) / 2
return loss
示例4: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""
train for one epoch on the training set
"""
batch_time = utils.AverageMeter()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
# training mode
model.train()
end = time.time()
for i, (input_points, labels) in enumerate(train_loader):
# bz x 2048 x 3
input_points = Variable(input_points)
input_points = input_points.transpose(2, 1)
labels = Variable(labels[:, 0])
# print(points.size())
# print(labels.size())
# shift data to GPU
if opt.cuda:
input_points = input_points.cuda()
labels = labels.long().cuda() # must be long cuda tensor
# forward, backward optimize
output, _ = model(input_points)
# debug_here()
loss = criterion(output, labels)
##############################
# measure accuracy
##############################
prec1 = utils.accuracy(output.data, labels.data, topk=(1,))[0]
losses.update(loss.data[0], input_points.size(0))
top1.update(prec1[0], input_points.size(0))
##############################
# compute gradient and do sgd
##############################
optimizer.zero_grad()
loss.backward()
##############################
# gradient clip stuff
##############################
utils.clip_gradient(optimizer, opt.gradient_clip)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'[email protected] {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
示例5: dec
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def dec(self, encoders, decoder_inputs, is_teacher_forcing, max_question_len):
'''
encoders (batch, hidden_size)
if is_teacher_forcing: decoder_inputs (batch, max_question_len)
if not is_teacher_forcing: decoder_inputs (batch, 1)
'''
decoder_inputs = Variable(decoder_inputs).long().cuda()
decoder_inputs = self.embedding(decoder_inputs)
decoder_inputs = decoder_inputs.transpose(0, 1)
encoders = encoders.expand(decoder_inputs.size(0), encoders.size(0), self.hidden_size*2)
inputs = torch.cat([decoder_inputs, encoders], -1)
if is_teacher_forcing:
outputs, hidden = self.dec_net(inputs)
outputs = self.dropout(outputs)
logits = self.fc_net(outputs) # qn_steps, batch, voc_size
_, predictions = torch.max(logits.transpose(0, 1), -1) #batch, qn_steps
predictions = predictions.cpu().data.numpy()
else:
logits = [0 for i in range(max_question_len)]
predictions = [0 for i in range(max_question_len)]
output, hidden = self.dec_net(inputs)
output = self.dropout(output)
logits[0] = self.fc_net(output)
_, index = torch.max(logits[0])
logits[0] = logits[0].view(1, decoder_inputs.size(1), self.voc_size) # 1,batch_size, voc_size
predictions[0] = index.cpu().data.numpy() # batch_size
for i in range(1, max_question_len):
prev_output = Variable(predictions[i-1]).long().cuda()
prev_output = self.embedding(prev_output)
inputs = torch.cat([prev_output, encoders[0]], -1)
output, hidden = self.dec_net(inputs, hidden)
output = self.dropout(output)
logits[i] = self.fc_net(output)
_, index = torch.max(logits[i])
logits[i] = logits[i].view(1, decoder_inputs.size(0), self.voc_size) # 1,batch_size, voc_size
predictions[i] = index.cpu().data.numpy() # batch_size
logits = torch.cat(logits)# qn_steps, batch, voc_size
predictions = np.array(predictions).transpose(1, 0)
return logits, predictions
示例6: pad_packed_sequence
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def pad_packed_sequence(sequence, batch_first=False, padding_value=0):
r"""Pads a packed batch of variable length sequences.
It is an inverse operation to :func:`pack_padded_sequence`.
The returned Variable's data will be of size ``T x B x *``, where `T` is the length
of the longest sequence and `B` is the batch size. If ``batch_first`` is True,
the data will be transposed into ``B x T x *`` format.
Batch elements will be ordered decreasingly by their length.
Arguments:
sequence (PackedSequence): batch to pad
batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
format.
padding_value (float, optional): values for padded elements.
Returns:
Tuple of Variable containing the padded sequence, and Variable
containing the list of lengths of each sequence in the batch.
"""
var_data, batch_sizes = sequence
max_batch_size = int(batch_sizes[0])
output = var_data.data.new(len(batch_sizes), max_batch_size, *var_data.size()[1:]).fill_(padding_value)
output = Variable(output)
lengths = []
data_offset = 0
prev_batch_size = int(batch_sizes[0])
prev_i = 0
for i, batch_size in enumerate(batch_sizes.tolist() + [0]):
if batch_size != prev_batch_size:
l = prev_batch_size * (i - prev_i)
tmp = var_data[data_offset:data_offset + l]
output[prev_i:i, :prev_batch_size] = tmp.view(i - prev_i, prev_batch_size, *tmp.size()[1:])
data_offset += l
prev_i = i
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
lengths.reverse()
if batch_first:
output = output.transpose(0, 1)
# This Variable doesn't actually have any history (well,
# technically it does; it's just untracked), it is purely here to
# make ONNX export easier. That is to say, from an autodiff
# standpoint this doesn't make any sense.
return output, Variable(torch.LongTensor(lengths))
示例7: pad_packed_sequence
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def pad_packed_sequence(sequence, batch_first=False, padding_value=0.0):
r"""Pads a packed batch of variable length sequences.
It is an inverse operation to :func:`pack_padded_sequence`.
The returned Variable's data will be of size TxBx*, where T is the length
of the longest sequence and B is the batch size. If ``batch_first`` is True,
the data will be transposed into BxTx* format.
Batch elements will be ordered decreasingly by their length.
Arguments:
sequence (PackedSequence): batch to pad
batch_first (bool, optional): if ``True``, the output will be in BxTx*
format.
padding_value (float, optional): values for padded elements
Returns:
Tuple of Variable containing the padded sequence, and a list of lengths
of each sequence in the batch.
"""
var_data, batch_sizes = sequence
max_batch_size = batch_sizes[0]
output = var_data.data.new(len(batch_sizes), max_batch_size, *var_data.size()[1:]).fill_(padding_value)
output = Variable(output)
lengths = []
data_offset = 0
prev_batch_size = batch_sizes[0]
prev_i = 0
for i, batch_size in enumerate(batch_sizes):
if batch_size != prev_batch_size:
l = prev_batch_size * (i - prev_i)
output[prev_i:i, :prev_batch_size] = var_data[data_offset:data_offset + l]
data_offset += l
prev_i = i
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
l = prev_batch_size * (len(batch_sizes) - prev_i)
output[prev_i:, :prev_batch_size] = var_data[data_offset:data_offset + l]
lengths.extend((i + 1,) * batch_size)
lengths.reverse()
if batch_first:
output = output.transpose(0, 1)
return output, lengths
示例8: get_loss
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def get_loss(self, logits, labels):
labels = Variable(labels).long().cuda()
labels = labels.transpose(0, 1)
for i in range(len(logits)):
logits[i] = logits[i].contiguous().view(1, logits[i].size(0), logits[i].size(1))
logits = torch.cat(logits)
logits = logits.contiguous().view(-1, logits.size(-1))
labels = labels.contiguous().view(-1)
loss = torch.mean(self.cost_func(logits, labels))
return loss
示例9: get_loss
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def get_loss(self, logits, labels):
labels = Variable(labels).long().cuda()
labels = labels.transpose(0, 1)
logits = logits.contiguous().view(-1, logits.size(-1))
labels = labels.contiguous().view(-1)
loss = torch.mean(self.cost_func(logits, labels))
return loss
示例10: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""
train for one epoch on the training set
"""
# training mode
model.train()
for i, (input_points, _labels, segs) in enumerate(train_loader):
# bz x 2048 x 3
input_points = Variable(input_points)
input_points = input_points.transpose(2, 1)
###############
##
###############
_labels = _labels.long()
segs = segs.long()
labels_onehot = utils.labels_batch2one_hot_batch(_labels, opt.num_classes)
labels_onehot = Variable(labels_onehot) # we dnonot calculate the gradients here
# labels_onehot.requires_grad = True
segs = Variable(segs)
if opt.cuda:
input_points = input_points.cuda()
segs = segs.cuda() # must be long cuda tensor
labels_onehot = labels_onehot.float().cuda() # this will be feed into the network
optimizer.zero_grad()
# forward, backward optimize
# pred, _ = model(input_points, labels_onehot)
pred, _, _ = model(input_points, labels_onehot)
pred = pred.view(-1, opt.num_seg_classes)
segs = segs.view(-1, 1)[:, 0]
# debug_here()
loss = criterion(pred, segs)
loss.backward()
##############################
# gradient clip stuff
##############################
utils.clip_gradient(optimizer, opt.gradient_clip)
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(segs.data).cpu().sum()
if i % opt.print_freq == 0:
print('[%d: %d] train loss: %f accuracy: %f' %(i, len(train_loader), loss.data[0], correct/float(opt.batch_size * opt.num_points)))
示例11: validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def validate(val_loader, model, criterion, epoch, opt):
"""Perform validation on the validation set"""
# switch to evaluate mode
model.eval()
top1 = utils.AverageMeter()
for i, (input_points, _labels, segs) in enumerate(val_loader):
# bz x 2048 x 3
input_points = Variable(input_points, volatile=True)
input_points = input_points.transpose(2, 1)
_labels = _labels.long() # this will be feed to the network
segs = segs.long()
labels_onehot = utils.labels_batch2one_hot_batch(_labels, opt.num_classes)
segs = Variable(segs, volatile=True)
labels_onehot = Variable(labels_onehot, volatile=True)
if opt.cuda:
input_points = input_points.cuda()
segs = segs.cuda() # must be long cuda tensor
labels_onehot = labels_onehot.float().cuda() # this will be feed into the network
# forward, backward optimize
pred, _, _ = model(input_points, labels_onehot)
pred = pred.view(-1, opt.num_seg_classes)
segs = segs.view(-1, 1)[:, 0] # min is already 0
# debug_here()
loss = criterion(pred, segs)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(segs.data).cpu().sum()
acc = correct/float(opt.batch_size * opt.num_points)
top1.update(acc, input_points.size(0))
if i % opt.print_freq == 0:
print('[%d: %d] val loss: %f accuracy: %f' %(i, len(val_loader), loss.data[0], acc))
# print(tested_samples)
return top1.avg
示例12: Variable
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
seq_tensor = Variable(torch.zeros(
(len(vectorized_seqs), seq_lengths.max()))).long()
for idx, (seq, seqlen) in enumerate(zip(vectorized_seqs, seq_lengths)):
seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
print("seq_tensor", seq_tensor)
# SORT YOUR TENSORS BY LENGTH!
seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)
seq_tensor = seq_tensor[perm_idx]
print("seq_tensor after sorting", seq_tensor)
# utils.rnn lets you give (B,L,D) tensors where B is the batch size, L is the maxlength, if you use batch_first=True
# Otherwise, give (L,B,D) tensors
seq_tensor = seq_tensor.transpose(0, 1) # (B,L,D) -> (L,B,D)
print("seq_tensor after transposing", seq_tensor.size(), seq_tensor.data)
# embed your sequences
embeded_seq_tensor = embed(seq_tensor)
print("seq_tensor after embeding", embeded_seq_tensor.size(), seq_tensor.data)
# pack them up nicely
packed_input = pack_padded_sequence(
embeded_seq_tensor, seq_lengths.cpu().numpy())
# throw them through your LSTM (remember to give batch_first=True here if
# you packed with it)
packed_output, (ht, ct) = lstm(packed_input)
# unpack your output if required
示例13: main
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def main(params):
net = getattr(resnet, params['model'])()
net.load_state_dict(torch.load(os.path.join(params['model_root'], params['model'] + '.pth')))
my_resnet = myResnetV(net)
my_resnet.cuda()
my_resnet.eval() # set the model to evaluation mode. Affects Dropout and BatchNorm layers.
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
seed(123) # make reproducible
dir_fc = params['output_dir'] + '_fc'
dir_att = params['output_dir'] + '_att'
if not os.path.isdir(dir_fc):
os.mkdir(dir_fc)
if not os.path.isdir(dir_att):
os.mkdir(dir_att)
seen_fc_att_shape = False
last_time = time.time()
for i, img in enumerate(imgs):
if i % (len(imgs) // 100) == 0:
now_time = time.time()
print('- processing %d/%d (%.2f%% done) time: %.2f' % (i, N, i * 100.0 / N, now_time - last_time))
last_time = now_time
# check if dest. file exists
if os.path.isfile(os.path.join(dir_fc, str(img['cocoid']) + '.npy')) \
and os.path.isfile(os.path.join(dir_att, str(img['cocoid']) + '.npz')):
continue
if 'coco' in params['input_json']:
# load the image
I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename']))
# handle grayscale input images
if len(I.shape) == 2:
I = I[:, :, np.newaxis]
I = np.concatenate((I, I, I), axis=2)
I = I.astype('float32') / 255.0
I = torch.from_numpy(I.transpose([2, 0, 1])).cuda() # (3, w, h)
I = Variable(preprocess(I), volatile=True)
tmp_fc, tmp_att = my_resnet(I, params['att_size'])
if not seen_fc_att_shape:
print('> tmp_fc shape:', tmp_fc.shape) # (2048,)
print('> tmp_att shape:', tmp_att.shape) # (14, 14, 2048)
seen_fc_att_shape = True
elif 'msvd' in params['input_json']:
# load images
frames = []
for frame_idx in range(26):
image_name = os.path.join(params['images_root'], '%d-%d.png' % (img['cocoid'], frame_idx))
I = skimage.io.imread(image_name)
if len(I.shape) == 2:
I = I[:, :, np.newaxis]
I = np.concatenate((I, I, I), axis=2)
I = I.astype('float32') / 255.0
I = I.transpose([2, 0, 1])
I = np.expand_dims(I, axis=0)
# I = torch.from_numpy(I.transpose([2, 0, 1])).cuda() # (3, w, d)
# I = Variable(preprocess(I), volatile=True)
frames.append(I)
elif 'kuaishou' in params['input_json']:
# load images
frames = []
for frame_idx in range(26):
try:
image_name = os.path.join(params['images_root'], '%d-%d.jpg' % (img['cocoid'], frame_idx + 1))
I = skimage.io.imread(image_name)
if len(I.shape) == 2:
I = I[:, :, np.newaxis]
I = np.concatenate((I, I, I), axis=2)
I = resize(I, (299, 299))
I = I.astype('float32') / 255.0
I = I.transpose([2, 0, 1])
I = np.expand_dims(I, axis=0)
# I = torch.from_numpy(I.transpose([2, 0, 1])).cuda() # (3, w, d)
# print('> image shape:', I.shape)
# I = Variab.le(preprocess(I), volatile=True)
frames.append(I)
except IOError:
# no such image file
if frame_idx > 0:
frames.append(frames[frame_idx - 1])
else:
raise ValueError('! image not found: %d-%d.jpg' % (img['cocoid'], frame_idx + 1))
img_b = np.vstack(frames)
img_b = torch.from_numpy(img_b).cuda()
img_b = Variable(preprocess(img_b), volatile=True)
#.........这里部分代码省略.........
示例14: Variable
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
dtype = torch.FloatTensor
#dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
learning_rate = 0.1
x = torch.from_numpy(np.ones((2,1)))
x = Variable(x.type(dtype), requires_grad=True)
A = torch.from_numpy(np.array([[1,0],[0,2]]))
A = Variable(A.type(dtype), requires_grad=False)
b = torch.from_numpy(np.array([[1],[2]]))
b = Variable(b.type(dtype), requires_grad=False)
for m in range(30):
opt1 = torch.mm(x.transpose(0,1), A)
loss = torch.mm(opt1, x) + torch.mm(b.transpose(0,1),x)
loss.backward()
minConf_PQN(funObj, x, funProj, options=None)
x.data -= learning_rate*x.grad.data
x.grad.data.zero_()
print x.data.numpy()
import pdb; pdb.set_trace()
示例15: test_transpose
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import transpose [as 别名]
def test_transpose(self):
x = Variable(torch.Tensor([[0, 1], [2, 3]]), requires_grad=True)
self.assertONNX(lambda x: x.transpose(0, 1).transpose(1, 0), x)