本文整理汇总了Python中torch.short方法的典型用法代码示例。如果您正苦于以下问题:Python torch.short方法的具体用法?Python torch.short怎么用?Python torch.short使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.short方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: torch_dtype_to_np_dtype
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例2: test_canonical_heat_type
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def test_canonical_heat_type(self):
self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type({})
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(object)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(1)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type("i7")
示例3: test_int16
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def test_int16(self):
self.assert_is_instantiable_heat_type(ht.int16, torch.int16)
self.assert_is_instantiable_heat_type(ht.short, torch.int16)
示例4: short
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def short(self):
return self.type_as(
torch.tensor(0, dtype=torch.short, device=self.device()))
示例5: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def __init__(
self,
linear_types=(torch.nn.Linear, torch.nn.Bilinear,),
convolution_types=(torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,),
linear_inputs=None,
linear_outputs=None,
convolution_inputs=None,
convolution_outputs=None,
float_types=(torch.half,),
integer_types=(torch.short,),
):
self.linear_types = linear_types
self.convolution_types = convolution_types
if linear_inputs is None:
self.linear_inputs = collections.defaultdict(lambda: ("in_features",))
self.linear_inputs[torch.nn.Bilinear] = ("in_features1", "in_features2")
else:
self.linear_inputs = linear_inputs
if linear_outputs is None:
self.linear_outputs = collections.defaultdict(lambda: ("out_features",))
else:
self.linear_outputs = linear_outputs
if convolution_inputs is None:
self.convolution_inputs = collections.defaultdict(lambda: ("in_channels",))
else:
self.convolution_inputs = convolution_inputs
if convolution_outputs is None:
self.convolution_outputs = collections.defaultdict(
lambda: ("out_channels",)
)
else:
self.convolution_outputs = convolution_outputs
self.float_types = float_types
self.integer_types = integer_types
示例6: tips
# 需要导入模块: import torch [as 别名]
# 或者: from torch import short [as 别名]
def tips(self, module: torch.nn.Module) -> str:
r"""**Return** `str` **representation of** `modules()` **method.**
It is advised to use this function to get tips in order to easily fix
possible performance issues related to Tensor Cores.
Parameters
----------
module : torch.nn.Module
Module to be scanned
Returns
-------
str
String representing tips related to Tensor Cores.
"""
data = self.modules(module)
def types():
_types = data["type"]
def parse_type(is_float: bool, goal):
key = "float" if is_float else "integer"
if _types[key]:
return "\nModules where {} type is not {}:\n".format(
key, goal
) + str(_types[key])
return ""
return parse_type(True, "torch.half") + parse_type(False, "torch.short")
def shape():
def parse_shape(dictionary, is_input: bool, goal):
key = "inputs" if is_input else "outputs"
if dictionary[key]:
return "\nModules where {} shape should be divisible by {}:\n".format(
key, goal
) + str(
dictionary[key]
)
return ""
_shapes = data["shape"]
def floating():
_floats = _shapes["float"]
return parse_shape(_floats, True, 8) + parse_shape(_floats, False, 8)
def integer():
_integers = _shapes["integer"]
return parse_shape(_integers, True, 16) + parse_shape(
_integers, False, 16
)
return floating() + integer()
output = types() + shape()
if output != "":
output = "TensorCores incompatible modules:" + output
return output