本文整理汇总了Python中torch.ByteTensor方法的典型用法代码示例。如果您正苦于以下问题:Python torch.ByteTensor方法的具体用法?Python torch.ByteTensor怎么用?Python torch.ByteTensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.ByteTensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def to_tensor(pic):
"""Convert a ``numpy.ndarray`` image to tensor.
See ``ToTensor`` for more details.
Args:
pic (numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if _is_numpy_image(pic):
if pic.ndim == 2:
pic = cv2.cvtColor(pic, cv2.COLOR_GRAY2RGB)
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
else:
raise TypeError('pic should be ndarray. Got {}.'.format(type(pic)))
示例2: decode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
示例3: to_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_numpy_image(pic)):
raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8:
return img.float().div(255)
else:
return img
示例4: __getitem__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def __getitem__(self, item):
if isinstance(item, torch.ByteTensor):
if item.sum() == len(item):
dp_uvs = self.dp_uvs
else:
dp_uvs = []
for i in range(len(self.dp_uvs)):
if item[i]:
dp_uvs.append(self.dp_uvs[i])
else:
dp_uvs = []
for i in range(len(item)):
dp_uvs.append(self.dp_uvs[item[i]])
uv = DenseposeUVs(dp_uvs, self.size, self.flip)
return uv
示例5: fit_positive
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def fit_positive(rows, cols, yx_min, yx_max, anchors):
device_id = anchors.get_device() if torch.cuda.is_available() else None
batch_size, num, _ = yx_min.size()
num_anchors, _ = anchors.size()
valid = torch.prod(yx_min < yx_max, -1)
center = (yx_min + yx_max) / 2
ij = torch.floor(center)
i, j = torch.unbind(ij.long(), -1)
index = i * cols + j
anchors2 = anchors / 2
iou_matrix = utils.iou.torch.iou_matrix((yx_min - center).view(-1, 2), (yx_max - center).view(-1, 2), -anchors2, anchors2).view(batch_size, -1, num_anchors)
iou, index_anchor = iou_matrix.max(-1)
_positive = []
cells = rows * cols
for valid, index, index_anchor in zip(torch.unbind(valid), torch.unbind(index), torch.unbind(index_anchor)):
index, index_anchor = (t[valid] for t in (index, index_anchor))
t = utils.ensure_device(torch.ByteTensor(cells, num_anchors).zero_(), device_id)
t[index, index_anchor] = 1
_positive.append(t)
return torch.stack(_positive)
示例6: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def __call__(self, gray_image):
size = gray_image.size()
#print(size)
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
#color_image = torch.ByteTensor(3, size[0], size[1]).fill_(0)
#for label in range(1, len(self.cmap)):
for label in range(0, len(self.cmap)):
mask = gray_image[0] == label
#mask = gray_image == label
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
示例7: to_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_numpy_image(pic)):
raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8:
return img.float()
else:
return img
示例8: decode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
示例9: _get_whole_word_mask
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(self.source_dictionary)))
))
else:
mask_whole_words = None
return mask_whole_words
示例10: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def forward(self, emissions, targets, masks, beam=None):
"""
Compute the conditional log-likelihood of a sequence of target tokens given emission scores
Args:
emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output
``(batch_size, seq_len, vocab_size)``. We assume batch-first
targets (`~torch.LongTensor`): Sequence of target token indices
``(batch_size, seq_len)
masks (`~torch.ByteTensor`): Mask tensor with the same size as targets
Returns:
`~torch.Tensor`: approximated log-likelihood
"""
numerator = self._compute_score(emissions, targets, masks)
denominator = self._compute_normalizer(emissions, targets, masks, beam)
return numerator - denominator
示例11: get_whole_word_mask
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def get_whole_word_mask(args, dictionary):
bpe = encoders.build_bpe(args)
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(dictionary)))
))
return mask_whole_words
return None
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def forward(self, x, lengths):
"""
:param x: The input of size BxCxDxT
:param lengths: The actual length of each sequence in the batch
:return: Masked output from the module
"""
for module in self.seq_module:
x = module(x)
mask = torch.ByteTensor(x.size()).fill_(0)
if x.is_cuda:
mask = mask.cuda()
for i, length in enumerate(lengths):
length = length.item()
if (mask[i].size(2) - length) > 0:
mask[i].narrow(2, length, mask[i].size(2) - length).fill_(1)
x = x.masked_fill(mask, 0)
return x, lengths
示例13: test_masked_global_attention
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def test_masked_global_attention(self):
source_lengths = torch.IntTensor([7, 3, 5, 2])
# illegal_weights_mask = torch.ByteTensor([
# [0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 1, 1, 1, 1],
# [0, 0, 0, 0, 0, 1, 1],
# [0, 0, 1, 1, 1, 1, 1]])
batch_size = source_lengths.size(0)
dim = 20
memory_bank = Variable(torch.randn(batch_size,
source_lengths.max(), dim))
hidden = Variable(torch.randn(batch_size, dim))
attn = onmt.modules.GlobalAttention(dim)
_, alignments = attn(hidden, memory_bank,
memory_lengths=source_lengths)
# TODO: fix for pytorch 0.3
# illegal_weights = alignments.masked_select(illegal_weights_mask)
# self.assertEqual(0.0, illegal_weights.data.sum())
示例14: broadcast_object
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def broadcast_object(obj: Any) -> Any:
if is_master():
buffer = io.BytesIO()
torch.save(obj, buffer)
data = bytearray(buffer.getbuffer())
length_tensor = torch.LongTensor([len(data)])
length_tensor = broadcast(length_tensor)
data_tensor = torch.ByteTensor(data)
data_tensor = broadcast(data_tensor)
else:
length_tensor = torch.LongTensor([0])
length_tensor = broadcast(length_tensor)
data_tensor = torch.empty([length_tensor.item()], dtype=torch.uint8)
data_tensor = broadcast(data_tensor)
buffer = io.BytesIO(data_tensor.numpy())
obj = torch.load(buffer)
return obj
示例15: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import ByteTensor [as 别名]
def forward(self, feats, tags, mask):
r"""
用于计算CRF的前向loss,返回值为一个batch_size的FloatTensor,可能需要mean()求得loss。
:param torch.FloatTensor feats: batch_size x max_len x num_tags,特征矩阵。
:param torch.LongTensor tags: batch_size x max_len,标签矩阵。
:param torch.ByteTensor mask: batch_size x max_len,为0的位置认为是padding。
:return: torch.FloatTensor, (batch_size,)
"""
feats = feats.transpose(0, 1)
tags = tags.transpose(0, 1).long()
mask = mask.transpose(0, 1).float()
all_path_score = self._normalizer_likelihood(feats, mask)
gold_path_score = self._gold_score(feats, tags, mask)
return all_path_score - gold_path_score