本文整理汇总了Python中torch.bool方法的典型用法代码示例。如果您正苦于以下问题:Python torch.bool方法的具体用法?Python torch.bool怎么用?Python torch.bool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.bool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tensor2imgs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
"""Convert tensor to images.
Args:
tensor (torch.Tensor): Tensor that contains multiple images
mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
std (tuple[float], optional): Standard deviation of images.
Defaults to (1, 1, 1).
to_rgb (bool, optional): Whether convert the images to RGB format.
Defaults to True.
Returns:
list[np.ndarray]: A list that contains multiple images.
"""
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
示例2: _meshgrid
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def _meshgrid(self, x, y, row_major=True):
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool, optional): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
示例3: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def __call__(self, proposals, keypoint_logits):
heatmaps = []
valid = []
for proposals_per_image in proposals:
kp = proposals_per_image.get_field("keypoints")
heatmaps_per_image, valid_per_image = project_keypoints_to_heatmap(
kp, proposals_per_image, self.discretization_size
)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
keypoint_targets = cat(heatmaps, dim=0)
valid = cat(valid, dim=0).to(dtype=torch.bool)
valid = torch.nonzero(valid).squeeze(1)
# torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely
if keypoint_targets.numel() == 0 or len(valid) == 0:
return keypoint_logits.sum() * 0
N, K, H, W = keypoint_logits.shape
keypoint_logits = keypoint_logits.view(N * K, H * W)
keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
return keypoint_loss
示例4: __cat_dim__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def __cat_dim__(self, key, value):
r"""Returns the dimension for which :obj:`value` of attribute
:obj:`key` will get concatenated when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# Concatenate `*index*` and `*face*` attributes in the last dimension.
if bool(re.search('(index|face)', key)):
return -1
# By default, concatenate sparse matrices diagonally.
elif isinstance(value, SparseTensor):
return (0, 1)
return 0
示例5: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def __init__(self, edge_index: torch.Tensor, sizes: List[int],
node_idx: Optional[torch.Tensor] = None,
num_nodes: Optional[int] = None,
flow: str = "source_to_target", **kwargs):
N = int(edge_index.max() + 1) if num_nodes is None else num_nodes
edge_attr = torch.arange(edge_index.size(1))
adj = SparseTensor(row=edge_index[0], col=edge_index[1],
value=edge_attr, sparse_sizes=(N, N),
is_sorted=False)
adj = adj.t() if flow == 'source_to_target' else adj
self.adj = adj.to('cpu')
if node_idx is None:
node_idx = torch.arange(N)
elif node_idx.dtype == torch.bool:
node_idx = node_idx.nonzero().view(-1)
self.sizes = sizes
self.flow = flow
assert self.flow in ['source_to_target', 'target_to_source']
super(NeighborSampler, self).__init__(node_idx.tolist(),
collate_fn=self.sample, **kwargs)
示例6: k_fold
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def k_fold(dataset, folds):
skf = StratifiedKFold(folds, shuffle=True, random_state=12345)
test_indices, train_indices = [], []
for _, idx in skf.split(torch.zeros(len(dataset)), dataset.data.y):
test_indices.append(torch.from_numpy(idx).to(torch.long))
val_indices = [test_indices[i - 1] for i in range(folds)]
for i in range(folds):
train_mask = torch.ones(len(dataset), dtype=torch.bool)
train_mask[test_indices[i]] = 0
train_mask[val_indices[i]] = 0
train_indices.append(train_mask.nonzero().view(-1))
return train_indices, test_indices, val_indices
示例7: test_subgraph
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def test_subgraph():
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5],
])
edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
idx = torch.tensor([3, 4, 5], dtype=torch.long)
mask = torch.tensor([0, 0, 0, 1, 1, 1, 0], dtype=torch.bool)
indices = [3, 4, 5]
for subset in [idx, mask, indices]:
out = subgraph(subset, edge_index, edge_attr)
assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]]
assert out[1].tolist() == [7, 8, 9, 10]
out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True)
assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
assert out[1].tolist() == [7, 8, 9, 10]
示例8: test_batched_negative_sampling
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def test_batched_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
edge_index = torch.cat([edge_index, edge_index + 4], dim=1)
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
neg_edge_index = batched_negative_sampling(edge_index, batch)
assert neg_edge_index.size(1) <= edge_index.size(1)
adj = torch.zeros(8, 8, dtype=torch.bool)
adj[edge_index[0], edge_index[1]] = True
neg_adj = torch.zeros(8, 8, dtype=torch.bool)
neg_adj[neg_edge_index[0], neg_edge_index[1]] = True
assert (adj & neg_adj).sum() == 0
assert neg_adj[:4, 4:].sum() == 0
assert neg_adj[4:, :4].sum() == 0
示例9: clean
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def clean(self, edges_mask, groups):
edges_mask = edges_mask.astype(bool)
torch_mask = torch.from_numpy(edges_mask.copy())
self.gemm_edges = self.gemm_edges[edges_mask]
self.edges = self.edges[edges_mask]
self.sides = self.sides[edges_mask]
new_ve = []
edges_mask = np.concatenate([edges_mask, [False]])
new_indices = np.zeros(edges_mask.shape[0], dtype=np.int32)
new_indices[-1] = -1
new_indices[edges_mask] = np.arange(0, np.ma.where(edges_mask)[0].shape[0])
self.gemm_edges[:, :] = new_indices[self.gemm_edges[:, :]]
for v_index, ve in enumerate(self.ve):
update_ve = []
# if self.v_mask[v_index]:
for e in ve:
update_ve.append(new_indices[e])
new_ve.append(update_ve)
self.ve = new_ve
self.__clean_history(groups, torch_mask)
self.pool_count += 1
self.export()
示例10: load_ogb
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def load_ogb(name):
from ogb.nodeproppred import DglNodePropPredDataset
data = DglNodePropPredDataset(name=name)
splitted_idx = data.get_idx_split()
graph, labels = data[0]
labels = labels[:, 0]
graph.ndata['features'] = graph.ndata['feat']
graph.ndata['labels'] = labels
in_feats = graph.ndata['features'].shape[1]
num_labels = len(th.unique(labels))
# Find the node IDs in the training, validation, and test set.
train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']
train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
train_mask[train_nid] = True
val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
val_mask[val_nid] = True
test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
test_mask[test_nid] = True
graph.ndata['train_mask'] = train_mask
graph.ndata['val_mask'] = val_mask
graph.ndata['test_mask'] = test_mask
return graph, len(th.unique(graph.ndata['labels']))
示例11: add_insertion_noise
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
示例12: torch_dtype_to_np_dtype
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例13: lacks_value
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def lacks_value(ty) -> bool:
ty = ty.deref()
if isinstance(ty, TyNone):
return False
if isinstance(ty, TyNum):
return ty.value is None
if isinstance(ty, TyString):
return ty.value is None
if isinstance(ty, TyList):
return True
if isinstance(ty, TyTuple):
if not ty.is_fixed_len:
return True
return any([lacks_value(t) for t in ty.get_tys()])
if isinstance(ty, TyDict):
return True
if isinstance(ty, TyTensor):
return ty.shape is None or any([not i.has_value() for i in ty.shape])
if isinstance(ty, TyDType):
return ty.t is None
示例14: create_buffers
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
示例15: calculate_pos_recall
# 需要导入模块: import torch [as 别名]
# 或者: from torch import bool [as 别名]
def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
"""Calculate positive recall with score threshold.
Args:
cls_scores (list[Tensor]): Classification scores at all fpn levels.
Each tensor is in shape (N, num_classes * num_anchors, H, W)
labels_list (list[Tensor]): The label that each anchor is assigned
to. Shape (N * H * W * num_anchors, )
pos_inds (list[Tensor]): List of bool tensors indicating whether
the anchor is assigned to a positive label.
Shape (N * H * W * num_anchors, )
Returns:
Tensor: A single float number indicating the positive recall.
"""
with torch.no_grad():
num_class = self.num_classes
scores = [
cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
for cls, pos in zip(cls_scores, pos_inds)
]
labels = [
label.reshape(-1)[pos]
for label, pos in zip(labels_list, pos_inds)
]
scores = torch.cat(scores, dim=0)
labels = torch.cat(labels, dim=0)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(dim=1)
return accuracy(scores, labels, thresh=self.score_threshold)