本文整理汇总了Python中torch.argsort方法的典型用法代码示例。如果您正苦于以下问题:Python torch.argsort方法的具体用法?Python torch.argsort怎么用?Python torch.argsort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.argsort方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: order_points
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def order_points(pts):
pts_reorder = []
for idx, pt in enumerate(pts):
idx = torch.argsort(pt[:, 0])
xSorted = pt[idx, :]
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
leftMost = leftMost[torch.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
D = torch.cdist(tl[np.newaxis], rightMost)[0]
(br, tr) = rightMost[torch.argsort(D, descending=True), :]
pts_reorder.append(torch.stack([tl, tr, br, bl]))
return torch.stack([p for p in pts_reorder])
示例2: get_mask
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx):
assert wrapper_idx is not None
activations = self.pruner.collected_activation[wrapper_idx]
if len(activations) < self.statistics_batch_num:
return None
mean_activation = self._cal_mean_activation(activations)
prune_indices = torch.argsort(mean_activation)[:num_prune]
for idx in prune_indices:
base_mask['weight_mask'][idx] = 0.
if base_mask['bias_mask'] is not None:
base_mask['bias_mask'][idx] = 0.
if len(activations) >= self.statistics_batch_num and self.pruner.hook_id in self.pruner._fwd_hook_handles:
self.pruner.remove_activation_collector(self.pruner.hook_id)
return base_mask
示例3: compute
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def compute(self, pred: torch.Tensor, target: torch.Tensor) \
-> torch.Tensor:
"""Computes the recall @ k.
Parameters
----------
pred: Tensor
input logits of shape (B x N)
target: LongTensor
target tensor of shape (B) or (B x N)
Returns
-------
recall: torch.Tensor
single label recall, of shape (B)
"""
# If 2-dimensional, select the highest score in each row
if len(target.size()) == 2:
target = target.argmax(dim=1)
ranked_scores = torch.argsort(pred, dim=1)[:, -self.top_k:]
recalled = torch.sum((target.unsqueeze(1) == ranked_scores).float(), dim=1)
return recalled.mean()
示例4: _get_augmented_pareto_front_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def _get_augmented_pareto_front_indices(self) -> Tensor:
r"""Get indices of augmented pareto front."""
pf_idx = torch.argsort(self._pareto_Y, dim=0)
return torch.cat(
[
torch.zeros(
1, self.num_outcomes, dtype=torch.long, device=self.Y.device
),
# Add 1 because index zero is used for the ideal point
pf_idx + 1,
torch.full(
torch.Size([1, self.num_outcomes]),
self._pareto_Y.shape[0] + 1,
dtype=torch.long,
device=self.Y.device,
),
],
dim=0,
)
示例5: _run_encoder
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def _run_encoder(self, batch):
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
# added by @memray, resort examples in batch by lengths first
sort_idx = torch.argsort(src_lengths, descending=True)
sorted_src = src[:,sort_idx,:]
sorted_src_lengths = src_lengths[sort_idx]
unsort_idx = torch.argsort(sort_idx)
enc_states, memory_bank, src_lengths = self.model.encoder(
sorted_src, sorted_src_lengths)
enc_states = enc_states[:, unsort_idx, :]
memory_bank = memory_bank[:, unsort_idx, :]
src_lengths = src_lengths[unsort_idx]
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
示例6: _inverse_permutation
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def _inverse_permutation(self):
return torch.argsort(self._permutation)
示例7: test_compute_edge_score_tanh
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def test_compute_edge_score_tanh():
edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5],
[1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]])
raw = torch.randn(edge_index.size(1))
e = EdgePooling.compute_edge_score_tanh(raw, edge_index, 6)
assert torch.all(e >= -1) and torch.all(e <= 1)
assert torch.all(torch.argsort(raw) == torch.argsort(e))
示例8: test_compute_edge_score_sigmoid
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def test_compute_edge_score_sigmoid():
edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5],
[1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]])
raw = torch.randn(edge_index.size(1))
e = EdgePooling.compute_edge_score_sigmoid(raw, edge_index, 6)
assert torch.all(e >= 0) and torch.all(e <= 1)
assert torch.all(torch.argsort(raw) == torch.argsort(e))
示例9: build_part_with_score_torch
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def build_part_with_score_torch(score_threshold, local_max_radius, scores):
lmd = 2 * local_max_radius + 1
max_vals = F.max_pool2d(scores, lmd, stride=1, padding=1)
max_loc = (scores == max_vals) & (scores >= score_threshold)
max_loc_idx = max_loc.nonzero()
scores_vec = scores[max_loc]
sort_idx = torch.argsort(scores_vec, descending=True)
return scores_vec[sort_idx], max_loc_idx[sort_idx]
# FIXME leaving here as reference for now
# def build_part_with_score_fast(score_threshold, local_max_radius, scores):
# parts = []
# num_keypoints = scores.shape[0]
# lmd = 2 * local_max_radius + 1
#
# # NOTE it seems faster to iterate over the keypoints and perform maximum_filter
# # on each subarray vs doing the op on the full score array with size=(lmd, lmd, 1)
# for keypoint_id in range(num_keypoints):
# kp_scores = scores[keypoint_id, :, :].copy()
# kp_scores[kp_scores < score_threshold] = 0.
# max_vals = ndi.maximum_filter(kp_scores, size=lmd, mode='constant')
# max_loc = np.logical_and(kp_scores == max_vals, kp_scores > 0)
# max_loc_idx = max_loc.nonzero()
# for y, x in zip(*max_loc_idx):
# parts.append((
# scores[keypoint_id, y, x],
# keypoint_id,
# np.array((y, x))
# ))
#
# return parts
示例10: argsort
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def argsort(input, dim, descending):
return th.argsort(input, dim=dim, descending=descending)
示例11: get_rank
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def get_rank(batch_score, dim=0):
rank = torch.argsort(batch_score, dim=dim)
rank = torch.argsort(rank, dim=dim)
rank = (rank * -1) + batch_score.size(dim)
rank = rank.float()
rank = rank / batch_score.size(dim)
return rank
示例12: __getitem__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def __getitem__(self, index):
rand_seq = get_rand_seq(self.seq_len, self.dist)
zipp_sort_ind = zip(np.argsort(rand_seq)[::-1], range(self.seq_len))
ranks = [((y[1] + 1) / float(self.seq_len)) for y in sorted(zipp_sort_ind, key=lambda x: x[0])]
return torch.FloatTensor(rand_seq), torch.FloatTensor(ranks)
示例13: get_rank_single
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def get_rank_single(batch_score):
rank = torch.argsort(batch_score, dim=0)
rank = torch.argsort(rank, dim=0)
rank = (rank * -1) + batch_score.size(0)
rank = rank.float()
rank = rank / batch_score.size(0)
return rank
示例14: energy_spectrum
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def energy_spectrum(vel):
"""
Compute energy spectrum given a velocity field
:param vel: tensor of shape (N, 3, res, res, res)
:return spec: tensor of shape(N, res/2)
:return k: tensor of shape (res/2,), frequencies corresponding to spec
"""
device = vel.device
res = vel.shape[-2:]
assert(res[0] == res[1])
r = res[0]
k_end = int(r/2)
vel_ = pad_rfft3(vel, onesided=False) # (N, 3, res, res, res, 2)
uu_ = (torch.norm(vel_, dim=-1) / r**3)**2
e_ = torch.sum(uu_, dim=1) # (N, res, res, res)
k = fftfreqs(res).to(device) # (3, res, res, res)
rad = torch.norm(k, dim=0) # (res, res, res)
k_bin = torch.arange(k_end, device=device).float()+1
bins = torch.zeros(k_end+1).to(device)
bins[1:-1] = (k_bin[1:]+k_bin[:-1])/2
bins[-1] = k_bin[-1]
bins = bins.unsqueeze(0)
bins[1:] += 1e-3
inds = searchsorted(bins, rad.flatten().unsqueeze(0)).squeeze().int()
# bincount = torch.histc(inds.cpu(), bins=bins.shape[1]+1).to(device)
bincount = torch.bincount(inds)
asort = torch.argsort(inds.squeeze())
sorted_e_ = e_.view(e_.shape[0], -1)[:, asort]
csum_e_ = torch.cumsum(sorted_e_, dim=1)
binloc = torch.cumsum(bincount, dim=0).long()-1
spec_ = csum_e_[:,binloc[1:]] - csum_e_[:,binloc[:-1]]
spec_ = spec_[:, :-1]
spec_ = spec_ * 2 * np.pi * (k_bin.float()**2) / bincount[1:-1].float()
return spec_, k_bin
##################### COMPUTE STATS ###########################
示例15: word_shuffle
# 需要导入模块: import torch [as 别名]
# 或者: from torch import argsort [as 别名]
def word_shuffle(x, l, shuffle_len):
if not shuffle_len:
return x
noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
pad_mask = (pos_idx >= l.unsqueeze(1)).float()
scores = pos_idx.float() + ((1 - pad_mask) * noise + pad_mask) * shuffle_len
x2 = x.clone()
x2 = x2.gather(1, scores.argsort(1))
return x2