本文整理匯總了Python中torch.Storage方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.Storage方法的具體用法?Python torch.Storage怎麽用?Python torch.Storage使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.Storage方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def __init__(self, in_num, out_num, layer_num, max_link, storage_size=1024):
input_storage_1 = torch.Storage(storage_size)
input_storage_2 = torch.Storage(storage_size)
self.shared_allocation_1 = _SharedAllocation(input_storage_1)
self.shared_allocation_2 = _SharedAllocation(input_storage_2)
max_in_num = in_num + out_num * max_link
self.final_num_features = max_in_num
self.saved_features = []
self.max_link = max_link
super(_IntermediaBlock, self).__init__()
print('creating intermedia block ...')
self.adapters = []
for i in range(0, layer_num-1):
if i < max_link:
tmp_in_num = in_num + (i+1) * out_num
else:
tmp_in_num = max_in_num
print('intermedia layer %d input channel number is %d' % (i, tmp_in_num))
self.adapters.append(_EfficientDensenetBottleneck(self.shared_allocation_1,
self.shared_allocation_2,
tmp_in_num, out_num))
self.adapters = nn.ModuleList(self.adapters)
print('intermedia layer output channel number is %d' % out_num)
示例2: _map_location
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def _map_location(xpu, storage, location):
"""
Helper for `xpu.load` used when calling `torch.load`
Args:
storage (torch.Storage) : the initial deserialization of the
storage of the data read by `torch.load`, residing on the CPU.
location (str): tag identifiying the location the data being read
by `torch.load` was originally saved from.
Returns:
torch.Storage : the storage
"""
if xpu.is_gpu():
return storage.cuda(xpu._main_device_id)
else:
return storage
示例3: device_mapping
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
示例4: device_mapping
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
示例5: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024):
input_storage_1 = torch.Storage(storage_size)
input_storage_2 = torch.Storage(storage_size)
self.final_num_features = num_input_features + (growth_rate * num_layers)
self.shared_allocation_1 = _SharedAllocation(input_storage_1)
self.shared_allocation_2 = _SharedAllocation(input_storage_2)
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate,
growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
示例6: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Storage [as 別名]
def __init__(self, size):
self._cpu_storage = torch.Storage(size)
self._gpu_storages = []
if torch.cuda.is_available():
for device_idx in range(torch.cuda.device_count()):
with torch.cuda.device(device_idx):
self._gpu_storages.append(torch.Storage(size).cuda())