本文整理匯總了Python中torch.is_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.is_tensor方法的具體用法?Python torch.is_tensor怎麽用?Python torch.is_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.is_tensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: class_balanced_weight
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def class_balanced_weight(beta, samples_per_class):
assert 0 <= beta < 1, 'Wrong rang of beta {}'.format(beta)
if not isinstance(samples_per_class, np.ndarray):
if isinstance(samples_per_class, (list, tuple)):
samples_per_class = np.array(samples_per_class)
elif torch.is_tensor(samples_per_class):
samples_per_class = samples_per_class.numpy()
else:
raise NotImplementedError(
'Type of samples_per_class should be {}, {} or {} but got {}'.format(
(list, tuple), np.ndarray, torch.Tensor, type(samples_per_class)))
assert isinstance(samples_per_class, np.ndarray) \
and isinstance(beta, numbers.Number)
balanced_matrix = (1 - beta) / (1 - np.power(beta, samples_per_class))
return torch.Tensor(balanced_matrix)
示例2: calc_pdparam
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def calc_pdparam(state, algorithm, body):
'''
Prepare the state and run algorithm.calc_pdparam to get pdparam for action_pd
@param tensor:state For pdparam = net(state)
@param algorithm The algorithm containing self.net
@param body Body which links algorithm to the env which the action is for
@returns tensor:pdparam
@example
pdparam = calc_pdparam(state, algorithm, body)
action_pd = ActionPD(logits=pdparam) # e.g. ActionPD is Categorical
action = action_pd.sample()
'''
if not torch.is_tensor(state): # dont need to cast from numpy
state = guard_tensor(state, body)
state = state.to(algorithm.net.device)
pdparam = algorithm.calc_pdparam(state)
return pdparam
示例3: detection_collate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def forward(self, nodes):
if torch.is_tensor(nodes):
if self.neighbor_dict is not None:
neighbors = [random.sample(self.neighbor_dict[idx.item()], self.max_degree) if len(
self.neighbor_dict[idx.item()]) > self.max_degree else self.neighbor_dict[idx.item()] for idx in
nodes]
else:
if self.neighbor_dict is not None:
neighbors = [random.sample(self.neighbor_dict[idx], self.max_degree) if len(
self.neighbor_dict[idx]) > self.max_degree else self.neighbor_dict[idx] for idx in nodes]
nodes = torch.tensor(nodes, dtype=torch.long, device=self.flag.device)
if self.neighbor_dict is not None:
degrees = torch.tensor(list(map(len, neighbors)), dtype=torch.long, device=self.flag.device)
neighbors = list2tensor(neighbors, self.padding_idx, device=self.flag.device)
return nodes, neighbors, degrees
else:
return (nodes,)
示例5: __merge_states
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def __merge_states(self, state_list, type_state='hidden'):
if state_list is None:
return None
if isinstance(state_list[0], State):
return State().from_list(state_list)
if isinstance(state_list[0], tuple):
return tuple([self.__merge_states(s, type_state) for s in zip(*state_list)])
else:
if torch.is_tensor(state_list[0]):
if type_state == 'hidden':
batch_dim = 0 if state_list[0].dim() < 3 else 1
else:
batch_dim = 0 if self.batch_first else 1
return torch.cat(state_list, batch_dim)
else:
assert state_list[1:] == state_list[:-1] # all items are equal
return state_list[0]
開發者ID:nadavbh12,項目名稱:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代碼行數:19,代碼來源:state.py
示例6: move_to_cuda
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def move_to_cuda(sample):
# copy-pasted from
# https://github.com/pytorch/fairseq/blob/master/fairseq/utils.py
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
示例7: flip
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def flip(tensor, is_label=False):
"""Flip an image or a set of heatmaps left-right
Arguments:
tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]
Keyword Arguments:
is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
"""
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
if is_label:
tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)
else:
tensor = tensor.flip(tensor.ndimension() - 1)
return tensor
# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)
示例8: scatter
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
r"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
示例9: normalize_u
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def normalize_u(u, codomain, out=None):
if not torch.is_tensor(codomain) and codomain == 2:
u = F.normalize(u, p=2, dim=0, out=out)
elif codomain == float('inf'):
u = projmax_(u)
else:
uabs = torch.abs(u)
uph = u / uabs
uph[torch.isnan(uph)] = 1
uabs = uabs / torch.max(uabs)
uabs = uabs**(codomain - 1)
if codomain == 1:
u = uph * uabs / vector_norm(uabs, float('inf'))
else:
u = uph * uabs / vector_norm(uabs, codomain / (codomain - 1))
return u
示例10: __call__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def __call__(self, data):
num_nodes = data.num_nodes
if self.replace:
choice = np.random.choice(num_nodes, self.num, replace=True)
choice = torch.from_numpy(choice).to(torch.long)
elif not self.allow_duplicates:
choice = torch.randperm(num_nodes)[:self.num]
else:
choice = torch.cat([
torch.randperm(num_nodes)
for _ in range(math.ceil(self.num / num_nodes))
], dim=0)[:self.num]
for key, item in data:
if bool(re.search('edge', key)):
continue
if torch.is_tensor(item) and item.size(0) == num_nodes:
data[key] = item[choice]
return data
示例11: get
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def get(self, idx):
data = self.data.__class__()
if hasattr(self.data, '__num_nodes__'):
data.num_nodes = self.data.__num_nodes__[idx]
for key in self.data.keys:
item, slices = self.data[key], self.slices[key]
start, end = slices[idx].item(), slices[idx + 1].item()
# print(slices[idx], slices[idx + 1])
if torch.is_tensor(item):
s = list(repeat(slice(None), item.dim()))
s[self.data.__cat_dim__(key, item)] = slice(start, end)
elif start + 1 == end:
s = slices[start]
else:
s = slice(start, end)
data[key] = item[s]
return data
示例12: __apply__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def __apply__(self, item, func):
if torch.is_tensor(item):
return func(item)
elif isinstance(item, SparseTensor):
# Not all apply methods are supported for `SparseTensor`, e.g.,
# `contiguous()`. We can get around it by capturing the exception.
try:
return func(item)
except AttributeError:
return item
elif isinstance(item, (tuple, list)):
return [self.__apply__(v, func) for v in item]
elif isinstance(item, dict):
return {k: self.__apply__(v, func) for k, v in item.items()}
else:
return item
示例13: string
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def string(self, tensor, bpe_symbol=None, escape_unk=False):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return '\n'.join(self.string(t) for t in tensor)
def token_string(i):
if i == self.unk():
return self.unk_string(escape_unk)
else:
return self[i]
sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
if bpe_symbol is not None:
sent = (sent + ' ').replace(bpe_symbol, '').rstrip()
return sent
示例14: move_to_cuda
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
示例15: get_gray_and_color_flow
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import is_tensor [as 別名]
def get_gray_and_color_flow(self, Flow, max_rad=None):
assert isinstance(Flow, (np.ndarray, torch.Tensor))
if torch.is_tensor(Flow):
Flow = Flow.clone().detach().cpu()
if len(Flow.shape) == 4:
Flow = Flow[0, :, :, :]
# [2, H, W] -> [H, W, 2]
Flow = chw_to_hwc(Flow)
# [H, W, 2]
grayFlow = Flow.copy()
# [H, W, 3]
colorFlow = flow_to_color(Flow.copy(), max_rad=max_rad)
return grayFlow, colorFlow