本文整理汇总了Python中torch.stack函数的典型用法代码示例。如果您正苦于以下问题:Python stack函数的具体用法?Python stack怎么用?Python stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_single_item
def _get_single_item(self, index):
start_ind, end_ind, pid, label, camid = self.seqset[index]
imgseq = []
flowseq = []
for ind in range(start_ind, end_ind):
fname = self.identities[pid][camid][ind]
fpath_img = osp.join(self.root[0], fname)
imgrgb = Image.open(fpath_img).convert('RGB')
fpath_flow = osp.join(self.root[1], fname)
flowrgb = Image.open(fpath_flow).convert('RGB')
imgseq.append(imgrgb)
flowseq.append(flowrgb)
while len(imgseq) < self.seq_len:
imgseq.append(imgrgb)
flowseq.append(flowrgb)
seq = [imgseq, flowseq]
if self.transform is not None:
seq = self.transform(seq)
img_tensor = torch.stack(seq[0], 0)
if len(self.root) == 2:
flow_tensor = torch.stack(seq[1], 0)
else:
flow_tensor = None
return img_tensor, flow_tensor, pid, camid
示例2: lk_forward_backward_batch
def lk_forward_backward_batch(features, locations, window, steps):
sequence, C, H, W = list(features.size())
seq, num_pts, _ = list(locations.size())
assert seq == sequence, '{:} vs {:}'.format(features.size(), locations.size())
previous_pts = [ locations[0] ]
for iseq in range(1, sequence):
feature_old = features.narrow(0, iseq-1, 1)
feature_new = features.narrow(0, iseq , 1)
nextPts = lk_tensor_track_batch(feature_old, feature_new, previous_pts[iseq-1], window, steps, None)
previous_pts.append(nextPts)
fback_pts = [None] * (sequence-1) + [ previous_pts[-1] ]
for iseq in range(sequence-2, -1, -1):
feature_old = features.narrow(0, iseq+1, 1)
feature_new = features.narrow(0, iseq , 1)
backPts = lk_tensor_track_batch(feature_old, feature_new, fback_pts[iseq+1] , window, steps, None)
fback_pts[iseq] = backPts
back_pts = [None] * (sequence-1) + [ locations[-1] ]
for iseq in range(sequence-2, -1, -1):
feature_old = features.narrow(0, iseq+1, 1)
feature_new = features.narrow(0, iseq , 1)
backPts = lk_tensor_track_batch(feature_old, feature_new, back_pts[iseq+1] , window, steps, None)
back_pts[iseq] = backPts
return torch.stack(previous_pts), torch.stack(fback_pts), torch.stack(back_pts)
示例3: forward
def forward(self, hidden, encoder_outputs, attn_mask):
# Create variable to store attention energies
# hidden is 16 by 512
# encoder_outputs is 16 by 72 by 512
# this just uses the top layer of the 2-layer decoder.
# okay?
hidden = hidden.squeeze(0)
batch_size = hidden.size()[0]
attn_energies = []
for i in range(batch_size):
attn_energies.append(self.score(hidden[i], encoder_outputs[i]))
attn_energies = torch.stack(attn_energies).squeeze(0)
# attn_energies is 32 by 72
if attn_mask is not None:
attn_energies = attn_mask * attn_energies
attn_energies[attn_energies == 0] = -1e10
# i want to mask the attention energies
if attn_mask is None:
attn_energies = attn_energies.view(1, -1)
attn_energies = self.softmax(attn_energies)
context_vectors = []
for i in range(batch_size):
context_vectors.append(torch.matmul(attn_energies[i], encoder_outputs[i]))
context_vectors = torch.stack(context_vectors)
return context_vectors
示例4: process_batch_for_length
def process_batch_for_length(self, sequences, c_sequences):
"""
Assemble and pad data.
"""
assert len(sequences) == len(c_sequences)
lengths = Variable(self.tensor_type([len(seq) for seq in sequences]))
max_length = max(len(seq) for seq in sequences)
max_c_length = max(max(len(chars) for chars in seq)
for seq in c_sequences)
def _padded(seq, max_length):
_padded_seq = self.tensor_type(max_length).zero_()
_padded_seq[:len(seq)] = self.tensor_type(seq)
return _padded_seq
sequences = Variable(torch.stack(
[_padded(seq, max_length) for seq in sequences]))
def _padded_char(seq, max_length, max_c_length):
_padded = self.tensor_type(max_length, max_c_length).zero_()
for ind, tok in enumerate(seq):
_padded[ind, :len(tok)] = self.tensor_type(tok)
return _padded
c_sequences = Variable(torch.stack([
_padded_char(seq, max_length, max_c_length)
for seq in c_sequences]))
return (sequences, c_sequences, lengths)
示例5: singleTagLoss
def singleTagLoss(pred_tag, keypoints):
"""
associative embedding loss for one image
"""
eps = 1e-6
tags = []
pull = 0
for i in keypoints:
tmp = []
for j in i:
if j[1]>0:
tmp.append(pred_tag[j[0]])
if len(tmp) == 0:
continue
tmp = torch.stack(tmp)
tags.append(torch.mean(tmp, dim=0))
pull = pull + torch.mean((tmp - tags[-1].expand_as(tmp))**2)
if len(tags) == 0:
return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float())
tags = torch.stack(tags)[:,0]
num = tags.size()[0]
size = (num, num, tags.size()[1])
A = tags.unsqueeze(dim=1).expand(*size)
B = A.permute(1, 0, 2)
diff = A - B
diff = torch.pow(diff, 2).sum(dim=2)[:,:,0]
push = torch.exp(-diff)
push = (torch.sum(push) - num)
return push/((num - 1) * num + eps) * 0.5, pull/(num + eps)
示例6: adpW
def adpW(self,x):
'''
calculate the pairwise_att of everypair of inputs
output_size: (x.size(0),x.size(1)/2)
'''
x = x.detach()
x = self.adp_metric_embedding1(x)
x = self.adp_metric_embedding1_bn(x)
x = F.relu(x)
x = self.adp_metric_embedding2(x)
x = self.adp_metric_embedding2_bn(x)
x = F.relu(x)
x = self.adp_metric_embedding3(x)
x = self.adp_metric_embedding3_bn(x)
x = F.relu(x)
pairwise_att = F.sigmoid(self.adp_metric_embedding4(x))
# x = self.adp_metric_embedding2_bn(x)
diag_matrix1 = []
diag_matrix2 = []
for i in range(x.size(0)):
diag_matrix1.append(torch.diag(pairwise_att[i, :x.size(1)/2]))
for i in range(x.size(0)):
diag_matrix2.append(torch.diag(pairwise_att[i, x.size(1)/2:]))
pairwise_att1 = torch.stack(diag_matrix1)
pairwise_att2 = torch.stack(diag_matrix1)
return pairwise_att1, pairwise_att2
示例7: predict
def predict(self, x, attn_type = "hard"):
#predict with greedy decoding
emb = self.embedding(x)
h = Variable(torch.zeros(1, x.size(0), self.hidden_dim))
c = Variable(torch.zeros(1, x.size(0), self.hidden_dim))
enc_h, _ = self.encoder(emb, (h, c))
y = [Variable(torch.zeros(x.size(0)).long())]
self.attn = []
for t in range(x.size(1)):
emb_t = self.embedding(y[-1])
dec_h, (h, c) = self.decoder(emb_t.unsqueeze(1), (h, c))
scores = torch.bmm(enc_h, dec_h.transpose(1,2)).squeeze(2)
attn_dist = F.softmax(scores, dim = 1)
self.attn.append(attn_dist.data)
if attn_type == "hard":
_, argmax = attn_dist.max(1)
one_hot = Variable(torch.zeros_like(attn_dist.data).scatter_(-1, argmax.data.unsqueeze(1), 1))
context = torch.bmm(one_hot.unsqueeze(1), enc_h).squeeze(1)
else:
context = torch.bmm(attn_dist.unsqueeze(1), enc_h).squeeze(1)
pred = self.vocab_layer(torch.cat([dec_h.squeeze(1), context], 1))
_, next_token = pred.max(1)
y.append(next_token)
self.attn = torch.stack(self.attn, 0).transpose(0, 1)
return torch.stack(y, 0).transpose(0, 1)
示例8: __getitem__
def __getitem__(self, index):
if self.mode == 'test':
img_path, img_name = self.imgs[index]
img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img_name, img
img_path, mask_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
if self.mode == 'train':
mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
mask = Image.fromarray(mask.astype(np.uint8))
else:
mask = Image.open(mask_path)
if self.joint_transform is not None:
img, mask = self.joint_transform(img, mask)
if self.sliding_crop is not None:
img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
if self.transform is not None:
img_slices = [self.transform(e) for e in img_slices]
if self.target_transform is not None:
mask_slices = [self.target_transform(e) for e in mask_slices]
img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
return img, mask, torch.LongTensor(slices_info)
else:
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
示例9: predict
def predict(self, x_de, x_en):
bs = x_de.size(0)
emb_de = self.embedding_de(x_de) # bs,n_de,word_dim
emb_en = self.embedding_en(x_en) # bs,n_en,word_dim
h = Variable(torch.zeros(self.n_layers*self.directions, bs, self.hidden_dim).cuda())
c = Variable(torch.zeros(self.n_layers*self.directions, bs, self.hidden_dim).cuda())
enc_h, _ = self.encoder(emb_de, (h, c))
dec_h, _ = self.decoder(emb_en, (h, c))
# all the same. enc_h is bs,n_de,hiddensz*n_directions. h and c are both n_layers*n_directions,bs,hiddensz
if self.directions == 2:
enc_h = self.dim_reduce(enc_h) # bs,n_de,hiddensz
scores = torch.bmm(enc_h, dec_h.transpose(1,2))
# (bs,n_de,hiddensz) * (bs,hiddensz,n_en) = (bs,n_de,n_en)
y = [Variable(torch.cuda.LongTensor([sos_token]*bs))] # bs
self.attn = []
for t in range(x_en.size(1)-1): # iterate over english words, with teacher forcing
attn_dist = F.softmax(scores[:,:,t],dim=1) # bs,n_de
self.attn.append(attn_dist.data)
if self.attn_type == "hard":
_, argmax = attn_dist.max(1) # bs. for each batch, select most likely german word to pay attention to
one_hot = Variable(torch.zeros_like(attn_dist.data).scatter_(-1, argmax.data.unsqueeze(1), 1).cuda())
context = torch.bmm(one_hot.unsqueeze(1), enc_h).squeeze(1)
else:
context = torch.bmm(attn_dist.unsqueeze(1), enc_h).squeeze(1)
# the difference btwn hard and soft is just whether we use a one_hot or a distribution
# context is bs,hiddensz
pred = self.vocab_layer(torch.cat([dec_h[:,t,:], context], 1)) # bs,len(EN.vocab)
_, next_token = pred.max(1) # bs
y.append(next_token)
self.attn = torch.stack(self.attn, 0).transpose(0, 1) # bs,n_en,n_de (for visualization!)
y = torch.stack(y,0).transpose(0,1) # bs,n_en
return y,self.attn
示例10: forward
def forward(self, z_seq, a_seq, term_seq):
# x: [B,2,84,84]
# T = x.size()[0]
h = torch.zeros(1,self.h_size).cuda()
z_losses = []
term_losses = []
for t in range(len(term_seq)-1):
inter = self.encode_az(a_seq[t], z_seq[t])
h = self.update_h(h, inter)
z_pred, term_pred = self.predict_output(h, inter)
z_loss = torch.mean((z_seq[t+1] - z_pred)**2)
term_loss = F.binary_cross_entropy_with_logits(input=term_pred, target=term_seq[t+1])
z_losses.append(z_loss)
term_losses.append(term_loss)
z_loss = torch.mean(torch.stack(z_losses))
term_loss = torch.mean(torch.stack(term_losses))
loss = z_loss + term_loss
return loss, z_loss, term_loss
示例11: __getitem__
def __getitem__(self, index):
img_path, mask_path = self.imgs[index]
img, mask = Image.open(img_path).convert('RGB'), Image.open(mask_path)
mask = np.array(mask)
mask_copy = mask.copy()
for k, v in self.id_to_trainid.items():
mask_copy[mask == k] = v
mask = Image.fromarray(mask_copy.astype(np.uint8))
if self.joint_transform is not None:
img, mask = self.joint_transform(img, mask)
if self.sliding_crop is not None:
img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
if self.transform is not None:
img_slices = [self.transform(e) for e in img_slices]
if self.target_transform is not None:
mask_slices = [self.target_transform(e) for e in mask_slices]
img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
return img, mask, torch.LongTensor(slices_info)
else:
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
示例12: setUp
def setUp(self, size=(2, 5), batch=3, dtype=torch.float64, device=None,
seed=None, mu=None, cov=None, A=None, b=None):
'''Test the correctness of batch implementation of mean().
This function will stack `[1 * mu, 2 * mu, ..., batch * mu]`.
Then, it will see whether the batch output is accurate or not.
Args:
size: Tuple size of matrix A.
batch: The batch size > 0.
dtype: data type.
device: In which device.
seed: Seed for the random number generator.
mu: To test a specific mean mu.
cov: To test a specific covariance matrix.
A: To test a specific A matrix.
b: To test a specific bias b.
'''
if seed is not None:
torch.manual_seed(seed)
if A is None:
A = torch.rand(size, dtype=dtype, device=device)
if b is None:
b = torch.rand(size[0], dtype=dtype, device=device)
if mu is None:
mu = torch.rand(size[1], dtype=dtype, device=device)
if cov is None:
cov = rand.definite(size[1], dtype=dtype, device=device,
positive=True, semi=False, norm=10**2)
self.A = A
self.b = b
var = torch.diag(cov)
self.batch_mean = torch.stack([(i + 1) * mu for i in range(batch)])
self.batch_cov = torch.stack([(i + 1) * cov for i in range(batch)])
self.batch_var = torch.stack([(i + 1) * var for i in range(batch)])
示例13: default_collate
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif type(batch[0]).__module__ == 'numpy':
elem = batch[0]
if type(elem).__name__ == 'ndarray':
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
.format(type(batch[0]))))
示例14: plot_rec
def plot_rec(x, netEC, netEP, netD):
x_c = x[0]
x_p = x[np.random.randint(1, opt.max_step)]
h_c = netEC(x_c)
h_p = netEP(x_p)
# print('h_c shape: ', h_c.shape)
# print('h p shape: ', h_p.shape)
rec = netD([h_c, h_p])
x_c, x_p, rec = x_c.data, x_p.data, rec.data
fname = '%s/rec/rec_test.png' % (opt.log_dir)
comparison = None
for i in range(len(x_c)):
if comparison is None:
comparison = torch.stack([x_c[i], x_p[i], rec[i]])
else:
new_comparison = torch.stack([x_c[i], x_p[i], rec[i]])
comparison = torch.cat([comparison, new_comparison])
print('comparison: ', comparison.shape)
# row_sz = 5
# nplot = 20
# for i in range(0, nplot - row_sz, row_sz):
# row = [[xc, xp, xr] for xc, xp, xr in zip(x_c[i:i + row_sz], x_p[i:i + row_sz], rec[i:i + row_sz])]
# print('row: ', row)
# to_plot.append(list(itertools.chain(*row)))
# print(len(to_plot[0]))
# utils.save_tensors_image(fname, comparison)
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
save_image(comparison.cpu(), fname, nrow=3)
示例15: forward
def forward(self, input_):
#init hidden state with xavier
vert_state = torch.zeros(input_[0].size(1), self.vert_state_dim).cuda()
edge_state = torch.zeros(input_[1].size(1), self.edge_state_dim).cuda()
'''if self.gpu_mode >= 0:
vert_state = torch.tensor(vert_state.cuda())
edge_state = torch.tensor(edge_state.cuda())'''
batch_size = input_[0].size(0)
vert_input = input_[0]
edge_input = input_[1]
#print('vert and edge input', vert_input.size(), edge_input.size())
vert_state_list = []
edge_state_list = []
#todo: can this be parallelized?
for i in range(batch_size):
torch.nn.init.xavier_uniform(vert_state)
torch.nn.init.xavier_uniform(edge_state)
vert_state = self.vert_gru(vert_input[i], vert_state)
edge_state = self.edge_gru(edge_input[i], edge_state)
#todo: check whether this way is correct, TF code uses a separate global var to keep hidden state
for i in range(self.num_steps):
edge_context = self.get_edge_context(edge_state, vert_state)
vert_context = self.get_vert_context(vert_state, edge_state)
edge_state = self.edge_gru(edge_context, edge_state)
vert_state = self.vert_gru(vert_context, vert_state)
vert_state_list.append(vert_state)
edge_state_list.append(edge_state)
return torch.stack(vert_state_list), torch.stack(edge_state_list)