本文整理汇总了Python中torch._six.int_classes方法的典型用法代码示例。如果您正苦于以下问题:Python _six.int_classes方法的具体用法?Python _six.int_classes怎么用?Python _six.int_classes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch._six
的用法示例。
在下文中一共展示了_six.int_classes方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: collate
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def collate(self, batch):
elem = batch[0]
if isinstance(elem, Data):
return Batch.from_data_list(batch, self.follow_batch)
elif isinstance(elem, torch.Tensor):
return default_collate(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: self.collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(self.collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [self.collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(type(elem)))
示例2: fast_batch_collator
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def fast_batch_collator(batched_inputs):
"""
A simple batch collator for most common reid tasks
"""
elem = batched_inputs[0]
if isinstance(elem, torch.Tensor):
out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype)
for i, tensor in enumerate(batched_inputs):
out[i] += tensor
return out
elif isinstance(elem, container_abcs.Mapping):
return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem}
elif isinstance(elem, float):
return torch.tensor(batched_inputs, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batched_inputs)
elif isinstance(elem, string_classes):
return batched_inputs
示例3: concatenate_cache
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def concatenate_cache(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
return torch.cat(batch, 0, out=out) # the main difference is here
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return concatenate_cache([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: concatenate_cache([d[key] for d in batch])
for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'):
return type(batch[0])(*(concatenate_cache(samples)
for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence): # also some diffs here
# just unpack
return [s_ for s in batch for s_ in s]
raise TypeError((error_msg_fmt.format(type(batch[0]))))
示例4: default_collate
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
示例5: default_collate
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def default_collate(batch):
"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
return torch.stack(batch, 0)
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
): # pragma: no cover
elem = batch[0]
if elem_type.__name__ == "ndarray":
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith("float") else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes): # pragma: no cover
return torch.LongTensor(batch)
elif isinstance(batch[0], float): # pragma: no cover
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes): # pragma: no cover
return batch
elif isinstance(batch[0], container_abcs.Mapping): # pragma: no cover
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], container_abcs.Sequence): # pragma: no cover
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
示例6: mt_collate
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def mt_collate(batch):
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
stacked = torch.stack(batch, 0)
return stacked
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return __numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: mt_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [mt_collate(samples) for samples in transposed]
return batch
示例7: __init__
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
示例8: __init__
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def __init__(self, weights, num_samples=None, replacement=True):
# NOTE: Adapted `WeightedRandomSampler` to accept `num_samples=0` and `num_samples=None`.
if num_samples is None:
num_samples = len(weights)
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples < 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
示例9: collate
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def collate(batch, *, root=True):
"Puts each data field into a tensor with outer dimension batch size"
if len(batch) == 0:
return batch
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
return default_collate(batch)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
return default_collate(batch)
elif isinstance(batch[0], int_classes):
return batch
elif isinstance(batch[0], float):
return batch
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], CameraIntrinsics):
return batch
elif isinstance(batch[0], Mapping):
if root:
return {key: collate([d[key] for d in batch], root=False) for key in batch[0]}
else:
return batch
elif isinstance(batch[0], Sequence):
return [collate(e, root=False) for e in batch]
raise TypeError((error_msg.format(type(batch[0]))))
示例10: __init__
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = weights / weights.sum()
self.num_samples = num_samples
self.replacement = replacement
示例11: __init__
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def __init__(self, sampler, batch_size, drop_last, cfg):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
if (
not isinstance(batch_size, _int_classes)
or isinstance(batch_size, bool)
or batch_size <= 0
):
raise ValueError(
"batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size)
)
if not isinstance(drop_last, bool):
raise ValueError(
"drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last)
)
self.sampler = sampler
self.drop_last = drop_last
bs_factor = [
int(
round(
(
float(cfg.DATA.TRAIN_CROP_SIZE)
/ (s * cfg.MULTIGRID.DEFAULT_S)
)
** 2
)
)
for s in cfg.MULTIGRID.SHORT_CYCLE_FACTORS
]
self.batch_sizes = [
batch_size * bs_factor[0],
batch_size * bs_factor[1],
batch_size,
]
示例12: __init__
# 需要导入模块: from torch import _six [as 别名]
# 或者: from torch._six import int_classes [as 别名]
def __init__(self, sampler, batch_size, max_iteration=100000000, drop_last=True):
"""
数据加载,默认循环加载1亿次,几近无限迭代.
每次迭代输出一个批次的数据.
:param sampler: 采样器,传入 不同采样器 实现 不同的采样策略, RandomSampler随机采样,SequentialSampler顺序采样
:param batch_size: 批次大小
:param max_iteration: 迭代次数
:param drop_last: 是否弃掉最后的不够一批次的数据。True则弃掉;False保留,并返回,但是这一批次会小于指定批次大小。
"""
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(max_iteration, _int_classes) or isinstance(max_iteration, bool) or \
max_iteration <= 0:
raise ValueError("max_iter should be a positive integer value, "
"but got max_iter={}".format(max_iteration))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.max_iteration = max_iteration
self.drop_last = drop_last