当前位置: 首页>>代码示例>>Python>>正文


Python torch.Storage方法代码示例

本文整理汇总了Python中torch.Storage方法的典型用法代码示例。如果您正苦于以下问题:Python torch.Storage方法的具体用法?Python torch.Storage怎么用?Python torch.Storage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.Storage方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, in_num, out_num, layer_num, max_link, storage_size=1024):
        input_storage_1 = torch.Storage(storage_size)
        input_storage_2 = torch.Storage(storage_size)
        self.shared_allocation_1 = _SharedAllocation(input_storage_1)
        self.shared_allocation_2 = _SharedAllocation(input_storage_2)
        max_in_num = in_num + out_num * max_link
        self.final_num_features = max_in_num
        self.saved_features = []
        self.max_link = max_link
        super(_IntermediaBlock, self).__init__()
        print('creating intermedia block ...')
        self.adapters = []
        for i in range(0, layer_num-1):
            if i < max_link:
                tmp_in_num = in_num + (i+1) * out_num
            else:
                tmp_in_num = max_in_num
            print('intermedia layer %d input channel number is %d' % (i, tmp_in_num))
            self.adapters.append(_EfficientDensenetBottleneck(self.shared_allocation_1,
                                                              self.shared_allocation_2,
                                                              tmp_in_num, out_num))
        self.adapters = nn.ModuleList(self.adapters)
        print('intermedia layer output channel number is %d' % out_num) 
开发者ID:zhiqiangdon,项目名称:CU-Net,代码行数:25,代码来源:cu_net_prev_version.py

示例2: _map_location

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def _map_location(xpu, storage, location):
        """
        Helper for `xpu.load` used when calling `torch.load`

        Args:
            storage (torch.Storage) : the initial deserialization of the
                storage of the data read by `torch.load`, residing on the CPU.
            location (str): tag identifiying the location the data being read
                by `torch.load` was originally saved from.

        Returns:
            torch.Storage : the storage
        """
        if xpu.is_gpu():
            return storage.cuda(xpu._main_device_id)
        else:
            return storage 
开发者ID:Erotemic,项目名称:netharn,代码行数:19,代码来源:device.py

示例3: device_mapping

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def device_mapping(cuda_device: int):
    """
    In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
    you have to supply a `map_location` function. Call this with
    the desired `cuda_device` to get the function that `torch.load()` needs.
    """

    def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:
        if cuda_device >= 0:
            return storage.cuda(cuda_device)
        else:
            return storage

    return inner_device_mapping 
开发者ID:allenai,项目名称:allennlp,代码行数:16,代码来源:util.py

示例4: device_mapping

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def device_mapping(cuda_device: int):
    """
    In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
    you have to supply a `map_location` function. Call this with
    the desired `cuda_device` to get the function that `torch.load()` needs.
    """
    def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:  # pylint: disable=unused-argument
        if cuda_device >= 0:
            return storage.cuda(cuda_device)
        else:
            return storage
    return inner_device_mapping 
开发者ID:jcyk,项目名称:gtos,代码行数:14,代码来源:nn.py

示例5: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024):
        input_storage_1 = torch.Storage(storage_size)
        input_storage_2 = torch.Storage(storage_size)
        self.final_num_features = num_input_features + (growth_rate * num_layers)
        self.shared_allocation_1 = _SharedAllocation(input_storage_1)
        self.shared_allocation_2 = _SharedAllocation(input_storage_2)

        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate,
                                growth_rate, bn_size, drop_rate)
            self.add_module('denselayer%d' % (i + 1), layer) 
开发者ID:phybrain,项目名称:efficientdensenet_crnn,代码行数:14,代码来源:efficient_densecrnn.py

示例6: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Storage [as 别名]
def __init__(self, size):
        self._cpu_storage = torch.Storage(size)
        self._gpu_storages = []
        if torch.cuda.is_available():
            for device_idx in range(torch.cuda.device_count()):
                with torch.cuda.device(device_idx):
                    self._gpu_storages.append(torch.Storage(size).cuda()) 
开发者ID:ebagdasa,项目名称:backdoor_federated_learning,代码行数:9,代码来源:dense_efficient.py


注:本文中的torch.Storage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。