本文整理匯總了Python中torch.CharTensor方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.CharTensor方法的具體用法?Python torch.CharTensor怎麽用?Python torch.CharTensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.CharTensor方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _worker_loop
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def _worker_loop(dataset, index_queue, data_queue, collate_fn):
global _use_shared_memory
_use_shared_memory = True
# torch.set_num_threads(1)
while True:
r = index_queue.get()
if r is None:
data_queue.put(None)
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
# numpy_type_map = {
# 'float64': torch.DoubleTensor,
# 'float32': torch.FloatTensor,
# 'float16': torch.HalfTensor,
# 'int64': torch.LongTensor,
# 'int32': torch.IntTensor,
# 'int16': torch.ShortTensor,
# 'int8': torch.CharTensor,
# 'uint8': torch.ByteTensor,
# }
示例2: is_integer_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def is_integer_tensor(tensor: torch.Tensor) -> bool:
return (
isinstance(tensor, torch.ByteTensor)
or isinstance(tensor, torch.CharTensor)
or isinstance(tensor, torch.ShortTensor)
or isinstance(tensor, torch.IntTensor)
or isinstance(tensor, torch.LongTensor)
)
示例3: test_horovod_allgather
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor,
torch.cuda.DoubleTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = tensor.type(dtype)
gathered = hvd.allgather(tensor)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
示例4: test_horovod_allgather_variable_size
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor,
torch.cuda.DoubleTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = tensor.type(dtype)
gathered = hvd.allgather(tensor)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.data.min() == i
assert rank_tensor.data.max() == i
示例5: test_horovod_broadcast
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
return
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor,
torch.cuda.DoubleTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = tensor.type(dtype)
root_tensor = root_tensor.type(dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
示例6: test_horovod_broadcast_inplace
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
return
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor,
torch.cuda.DoubleTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = tensor.type(dtype)
root_tensor = root_tensor.type(dtype)
broadcasted_tensor = hvd.broadcast_(tensor, root_rank)
assert (tensor == broadcasted_tensor).min() == 1, \
'hvd.broadcast does not modify source tensor'
assert (broadcasted_tensor == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
示例7: default_collate_with_string
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def default_collate_with_string(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = False
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
string_classes = (str, bytes)
if torch.is_tensor(batch[0]):
#print("IN","torch.is_tensor(batch[0])")
#IPython.embed()
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
#print("batch:",[e.numpy().shape for e in batch])
return torch.stack(batch, 0, out=out)
elif type(batch[0]).__module__ == 'numpy':
elem = batch[0]
#print("IN", "type(batch[0]).__module__ == 'numpy'")
#IPython.embed()
if type(elem).__name__ == 'ndarray':
if elem.dtype.kind in {'U', 'S'}:
return np.stack(batch, 0)
else:
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.FloatTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate_with_string([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate_with_string(samples) for samples in transposed]
raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
.format(type(batch[0]))))
示例8: test_horovod_allgather_grad
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import CharTensor [as 別名]
def test_horovod_allgather_grad(self):
"""Test the correctness of the allgather gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor,
torch.cuda.DoubleTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = tensor.type(dtype)
tensor = torch.autograd.Variable(tensor, requires_grad=True)
grad_list = []
for r, size in enumerate(tensor_sizes):
grad_list.append(torch.ones([size] + [17] * (dim - 1)) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))