本文整理汇总了Python中torch.Storage方法的典型用法代码示例。如果您正苦于以下问题:Python torch.Storage方法的具体用法?Python torch.Storage怎么用?Python torch.Storage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.Storage方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, in_num, out_num, layer_num, max_link, storage_size=1024):
input_storage_1 = torch.Storage(storage_size)
input_storage_2 = torch.Storage(storage_size)
self.shared_allocation_1 = _SharedAllocation(input_storage_1)
self.shared_allocation_2 = _SharedAllocation(input_storage_2)
max_in_num = in_num + out_num * max_link
self.final_num_features = max_in_num
self.saved_features = []
self.max_link = max_link
super(_IntermediaBlock, self).__init__()
print('creating intermedia block ...')
self.adapters = []
for i in range(0, layer_num-1):
if i < max_link:
tmp_in_num = in_num + (i+1) * out_num
else:
tmp_in_num = max_in_num
print('intermedia layer %d input channel number is %d' % (i, tmp_in_num))
self.adapters.append(_EfficientDensenetBottleneck(self.shared_allocation_1,
self.shared_allocation_2,
tmp_in_num, out_num))
self.adapters = nn.ModuleList(self.adapters)
print('intermedia layer output channel number is %d' % out_num)
示例2: _map_location
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def _map_location(xpu, storage, location):
"""
Helper for `xpu.load` used when calling `torch.load`
Args:
storage (torch.Storage) : the initial deserialization of the
storage of the data read by `torch.load`, residing on the CPU.
location (str): tag identifiying the location the data being read
by `torch.load` was originally saved from.
Returns:
torch.Storage : the storage
"""
if xpu.is_gpu():
return storage.cuda(xpu._main_device_id)
else:
return storage
示例3: device_mapping
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
示例4: device_mapping
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
示例5: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024):
input_storage_1 = torch.Storage(storage_size)
input_storage_2 = torch.Storage(storage_size)
self.final_num_features = num_input_features + (growth_rate * num_layers)
self.shared_allocation_1 = _SharedAllocation(input_storage_1)
self.shared_allocation_2 = _SharedAllocation(input_storage_2)
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate,
growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
示例6: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, size):
self._cpu_storage = torch.Storage(size)
self._gpu_storages = []
if torch.cuda.is_available():
for device_idx in range(torch.cuda.device_count()):
with torch.cuda.device(device_idx):
self._gpu_storages.append(torch.Storage(size).cuda())