本文整理匯總了Python中torch.short方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.short方法的具體用法?Python torch.short怎麽用?Python torch.short使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.short方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: torch_dtype_to_np_dtype
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例2: test_canonical_heat_type
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def test_canonical_heat_type(self):
self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type({})
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(object)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(1)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type("i7")
示例3: test_int16
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def test_int16(self):
self.assert_is_instantiable_heat_type(ht.int16, torch.int16)
self.assert_is_instantiable_heat_type(ht.short, torch.int16)
示例4: short
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def short(self):
return self.type_as(
torch.tensor(0, dtype=torch.short, device=self.device()))
示例5: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def __init__(
self,
linear_types=(torch.nn.Linear, torch.nn.Bilinear,),
convolution_types=(torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,),
linear_inputs=None,
linear_outputs=None,
convolution_inputs=None,
convolution_outputs=None,
float_types=(torch.half,),
integer_types=(torch.short,),
):
self.linear_types = linear_types
self.convolution_types = convolution_types
if linear_inputs is None:
self.linear_inputs = collections.defaultdict(lambda: ("in_features",))
self.linear_inputs[torch.nn.Bilinear] = ("in_features1", "in_features2")
else:
self.linear_inputs = linear_inputs
if linear_outputs is None:
self.linear_outputs = collections.defaultdict(lambda: ("out_features",))
else:
self.linear_outputs = linear_outputs
if convolution_inputs is None:
self.convolution_inputs = collections.defaultdict(lambda: ("in_channels",))
else:
self.convolution_inputs = convolution_inputs
if convolution_outputs is None:
self.convolution_outputs = collections.defaultdict(
lambda: ("out_channels",)
)
else:
self.convolution_outputs = convolution_outputs
self.float_types = float_types
self.integer_types = integer_types
示例6: tips
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import short [as 別名]
def tips(self, module: torch.nn.Module) -> str:
r"""**Return** `str` **representation of** `modules()` **method.**
It is advised to use this function to get tips in order to easily fix
possible performance issues related to Tensor Cores.
Parameters
----------
module : torch.nn.Module
Module to be scanned
Returns
-------
str
String representing tips related to Tensor Cores.
"""
data = self.modules(module)
def types():
_types = data["type"]
def parse_type(is_float: bool, goal):
key = "float" if is_float else "integer"
if _types[key]:
return "\nModules where {} type is not {}:\n".format(
key, goal
) + str(_types[key])
return ""
return parse_type(True, "torch.half") + parse_type(False, "torch.short")
def shape():
def parse_shape(dictionary, is_input: bool, goal):
key = "inputs" if is_input else "outputs"
if dictionary[key]:
return "\nModules where {} shape should be divisible by {}:\n".format(
key, goal
) + str(
dictionary[key]
)
return ""
_shapes = data["shape"]
def floating():
_floats = _shapes["float"]
return parse_shape(_floats, True, 8) + parse_shape(_floats, False, 8)
def integer():
_integers = _shapes["integer"]
return parse_shape(_integers, True, 16) + parse_shape(
_integers, False, 16
)
return floating() + integer()
output = types() + shape()
if output != "":
output = "TensorCores incompatible modules:" + output
return output