本文整理汇总了Python中torch.randperm方法的典型用法代码示例。如果您正苦于以下问题:Python torch.randperm方法的具体用法?Python torch.randperm怎么用?Python torch.randperm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.randperm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
示例2: random_choice
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
gallery = torch.tensor(
gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
示例3: random_choice
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def random_choice(gallery, num):
"""Randomly select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
gallery = torch.tensor(
gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
示例4: train
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def train(self, dataset):
self.model.train()
self.optimizer.zero_grad()
total_loss = 0.0
indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):
ltree, linput, rtree, rinput, label = dataset[indices[idx]]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
loss.backward()
if idx % self.args.batchsize == 0 and idx > 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.epoch += 1
return total_loss / len(dataset)
# helper function for testing
示例5: shem
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def shem(roi_probs_neg, negative_count, ohem_poolsize):
"""
stochastic hard example mining: from a list of indices (referring to non-matched predictions),
determine a pool of highest scoring (worst false positives) of size negative_count*ohem_poolsize.
Then, sample n (= negative_count) predictions of this pool as negative examples for loss.
:param roi_probs_neg: tensor of shape (n_predictions, n_classes).
:param negative_count: int.
:param ohem_poolsize: int.
:return: (negative_count). indices refer to the positions in roi_probs_neg. If pool smaller than expected due to
limited negative proposals availabel, this function will return sampled indices of number < negative_count without
throwing an error.
"""
# sort according to higehst foreground score.
probs, order = roi_probs_neg[:, 1:].max(1)[0].sort(descending=True)
select = torch.tensor((ohem_poolsize * int(negative_count), order.size()[0])).min().int()
pool_indices = order[:select]
rand_idx = torch.randperm(pool_indices.size()[0])
return pool_indices[rand_idx[:negative_count].cuda()]
示例6: mixup_data
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
示例7: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
示例8: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = self._get_epoch_indices(g)
randperm = torch.randperm(len(indices), generator=g).tolist()
indices = indices[randperm]
else:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = self._get_epoch_indices(g)
# indices = torch.arange(len(self.dataset)).tolist()
# when balance len(indices) diff from dataset image_num
self.total_size = len(indices)
logging_rank('balance sample total_size: {}'.format(self.total_size), distributed=1, local_rank=self.rank)
# subsample
self.num_samples = int(len(indices) / self.num_replicas)
offset = self.num_samples * self.rank
indices = indices[offset: offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
示例9: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
batches = self._generate_batches()
g = torch.Generator()
g.manual_seed(self._epoch)
indices = list(torch.randperm(len(batches), generator=g))
# add extra samples to make it evenly divisible
indices += indices[:(self.num_batches * self.num_replicas - len(indices))]
assert len(indices) == self.num_batches * self.num_replicas
# subsample
offset = self.num_batches * self.rank
indices = indices[offset:offset + self.num_batches]
assert len(indices) == self.num_batches
for idx in indices:
batch = sorted(batches[idx], key=lambda i: i["ar"])
batch = [i["id"] for i in batch]
yield batch
示例10: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = list(torch.randperm(len(self.dataset), generator=g))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
示例11: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset: offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
示例12: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __call__(self, img):
if self.transforms is None:
return img
group = min(self.group, img.shape[0]//3)
same_group = self.same_group and (group>1)
range_img = group*3
if(same_group):
self.order = torch.randperm(len(self.transforms))
for i in self.order:
img[:range_img] = self.transforms[i](img[:range_img])
else:
for grp in range(group):
idx = 3*grp
self.order = torch.randperm(len(self.transforms))
for i in self.order:
img[idx:idx+3] = self.transforms[i](img[idx:idx+3])
img[:range_img] = img[:range_img].clamp(0, 1)
return img
示例13: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __call__(self, data):
num_nodes = data.num_nodes
if self.replace:
choice = np.random.choice(num_nodes, self.num, replace=True)
choice = torch.from_numpy(choice).to(torch.long)
elif not self.allow_duplicates:
choice = torch.randperm(num_nodes)[:self.num]
else:
choice = torch.cat([
torch.randperm(num_nodes)
for _ in range(math.ceil(self.num / num_nodes))
], dim=0)[:self.num]
for key, item in data:
if bool(re.search('edge', key)):
continue
if torch.is_tensor(item) and item.size(0) == num_nodes:
data[key] = item[choice]
return data
示例14: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def __call__(self, data):
pos = data.pos
if self.max_points > 0 and pos.size(0) > self.max_points:
perm = torch.randperm(pos.size(0))
pos = pos[perm[:self.max_points]]
pos = pos - pos.mean(dim=0, keepdim=True)
C = torch.matmul(pos.t(), pos)
e, v = torch.eig(C, eigenvectors=True) # v[:,j] is j-th eigenvector
data.pos = torch.matmul(data.pos, v)
if 'norm' in data:
data.norm = F.normalize(torch.matmul(data.norm, v))
return data
示例15: barabasi_albert_graph
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randperm [as 别名]
def barabasi_albert_graph(num_nodes, num_edges):
r"""Returns the :obj:`edge_index` of a Barabasi-Albert preferential
attachment model, where a graph of :obj:`num_nodes` nodes grows by
attaching new nodes with :obj:`num_edges` edges that are preferentially
attached to existing nodes with high degree.
Args:
num_nodes (int): The number of nodes.
num_edges (int): The number of edges from a new node to existing nodes.
"""
assert num_edges > 0 and num_edges < num_nodes
row, col = torch.arange(num_edges), torch.randperm(num_edges)
for i in range(num_edges, num_nodes):
row = torch.cat([row, torch.full((num_edges, ), i, dtype=torch.long)])
choice = np.random.choice(torch.cat([row, col]).numpy(), num_edges)
col = torch.cat([col, torch.from_numpy(choice)])
edge_index = torch.stack([row, col], dim=0)
edge_index, _ = remove_self_loops(edge_index)
edge_index = to_undirected(edge_index, num_nodes)
return edge_index