本文整理汇总了Python中torch.utils.data.dataloader._use_shared_memory方法的典型用法代码示例。如果您正苦于以下问题:Python dataloader._use_shared_memory方法的具体用法?Python dataloader._use_shared_memory怎么用?Python dataloader._use_shared_memory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils.data.dataloader
的用法示例。
在下文中一共展示了dataloader._use_shared_memory方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: use_shared_memory
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import _use_shared_memory [as 别名]
def use_shared_memory():
if torch.__version__ < '1.1':
import torch.utils.data.dataloader as torchdl
return torchdl._use_shared_memory
elif torch.__version__ < '1.2':
import torch.utils.data._utils.collate as torch_collate
return torch_collate._use_shared_memory
else:
return torch.utils.data.get_worker_info() is not None
示例2: default_collate_override
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import _use_shared_memory [as 别名]
def default_collate_override(batch):
dataloader._use_shared_memory = False
return default_collate_func(batch)
示例3: concat_collate
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import _use_shared_memory [as 别名]
def concat_collate(batch):
# type: (List[torch.Tensor]) -> torch.Tensor
"""
Puts each data field into a tensor stacking along the first dimension.
This is different to the default pytorch collate that stacks samples rather than
concatenating them.
:param batch: the input batch to be collated.
"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.cat(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.cat([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: concat_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [concat_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
示例4: collate
# 需要导入模块: from torch.utils.data import dataloader [as 别名]
# 或者: from torch.utils.data.dataloader import _use_shared_memory [as 别名]
def collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
res = {key: collate([d[key] for d in batch]) for key in batch[0] if key!='instance_mask'}
if 'instance_mask' in batch[0]:
max_obj = max([d['instance_mask'].shape[0] for d in batch])
instance_mask = torch.zeros(len(batch),max_obj,*(batch[0]['instance_mask'].shape[1:]))
for i in range(len(batch)):
num_obj = batch[i]['instance_mask'].shape[0]
instance_mask[i,:num_obj] = torch.from_numpy(batch[i]['instance_mask'])
res.update({'instance_mask':instance_mask})
return res
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))