当前位置: 首页>>代码示例>>Python>>正文


Python torch.int16方法代码示例

本文整理汇总了Python中torch.int16方法的典型用法代码示例。如果您正苦于以下问题:Python torch.int16方法的具体用法?Python torch.int16怎么用?Python torch.int16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.int16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: normalize_wav

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
    if tensor.dtype == torch.float32:
        pass
    elif tensor.dtype == torch.int32:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 2147483647.
        tensor[tensor < 0] /= 2147483648.
    elif tensor.dtype == torch.int16:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 32767.
        tensor[tensor < 0] /= 32768.
    elif tensor.dtype == torch.uint8:
        tensor = tensor.to(torch.float32) - 128
        tensor[tensor > 0] /= 127.
        tensor[tensor < 0] /= 128.
    return tensor 
开发者ID:pytorch,项目名称:audio,代码行数:18,代码来源:wav_utils.py

示例2: encode_uniform

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def encode_uniform(self, dmll, S, fout):
        """ encode coarsest scale, for which we assume a uniform prior. """
        write_shape(S.shape, fout)
        r = ArithmeticCoder(dmll.L)

        entropy_coding_bytes = 0
        with self.times.prefix_scope('uniform encode'):
            c_uniform = self._get_uniform_cdf(S.shape, dmll.L)
            for c in range(S.shape[1]):
                S_c = S[:, c, ...].to(torch.int16)
                encoded = r.range_encode(S_c, c_uniform, self.times)
                write_num_bytes_encoded(len(encoded), fout)
                entropy_coding_bytes += len(encoded)
                fout.write(encoded)

        return entropy_coding_bytes 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:18,代码来源:bitcoding.py

示例3: range_decode

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def range_decode(self, encoded_bytes, cdf, time_logger: StackTimeLogger = no_op.NoOp):
        """
        :param encoded_bytes: bytes encoded by range_encode
        :param cdf: cdf to use, either a NHWLp matrix or instance of CDFOut
        :return: decoded matrix as np.int16, NHW
        """
        if isinstance(cdf, CDFOut):
            logit_probs_c_sm, means_c, log_scales_c, K, targets = cdf

            N, _, H, W = means_c.shape

            with time_logger.run('ac.encode'):
                decoded = torchac.decode_logistic_mixture(
                        targets, means_c, log_scales_c, logit_probs_c_sm, encoded_bytes)

        else:
            N, H, W, Lp = cdf.shape
            assert Lp == self.L + 1, (Lp, self.L)

            with time_logger.run('ac.encode'):
                decoded = torchac.decode_cdf(cdf, encoded_bytes)

        return decoded.reshape(N, H, W) 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:25,代码来源:coders.py

示例4: torch_dtype_to_np_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:24,代码来源:types.py

示例5: update_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def update_dtype(self, old_dtype):
        updated = {}
        for k, v in old_dtype.items():
            if v == np.float32:
                dt = torch.float32
            elif v == np.float64:
                dt = torch.float64
            elif v == np.float16:
                dt = torch.float16
            elif v == np.uint8:
                dt = torch.uint8
            elif v == np.int8:
                dt = torch.int8
            elif v == np.int16:
                dt = torch.int16
            elif v == np.int32:
                dt = torch.int32
            elif v == np.int16:
                dt = torch.int16
            else:
                raise ValueError("Unsupported dtype {}".format(v))
            updated[k] = dt
        return updated 
开发者ID:heronsystems,项目名称:adeptRL,代码行数:25,代码来源:ops.py

示例6: sanitize_infinity

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def sanitize_infinity(dtype):
    """
    Returns largest possible value for the specified dtype.

    Parameters:
    -----------
    dtype: torch dtype

    Returns:
    --------
    large_enough: largest possible value for the given dtype
    """
    if dtype is torch.int8:
        large_enough = (1 << 7) - 1
    elif dtype is torch.int16:
        large_enough = (1 << 15) - 1
    elif dtype is torch.int32:
        large_enough = (1 << 31) - 1
    elif dtype is torch.int64:
        large_enough = (1 << 63) - 1
    else:
        large_enough = float("inf")

    return large_enough 
开发者ID:helmholtz-analytics,项目名称:heat,代码行数:26,代码来源:constants.py

示例7: test_canonical_heat_type

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def test_canonical_heat_type(self):
        self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
        self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
        self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
        self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
        self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
        self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)

        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type({})
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(object)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(1)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type("i7") 
开发者ID:helmholtz-analytics,项目名称:heat,代码行数:18,代码来源:test_types.py

示例8: pytorch_dtype_to_type

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def pytorch_dtype_to_type(dtype):
    """Map a pytorch dtype to a myia type."""
    import torch

    _type_map = {
        torch.int8: Int[8],
        torch.int16: Int[16],
        torch.int32: Int[32],
        torch.int64: Int[64],
        torch.uint8: UInt[8],
        torch.float16: Float[16],
        torch.float32: Float[32],
        torch.float64: Float[64],
        torch.bool: Bool,
    }
    if dtype not in _type_map:
        raise TypeError(f"Unsupported dtype {dtype}")
    return _type_map[dtype] 
开发者ID:mila-iqia,项目名称:myia,代码行数:20,代码来源:pytorch_abstract_types.py

示例9: _convert_dtype_value

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def _convert_dtype_value(val):
    """converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
    convert_torch_dtype_map = {7:"torch.float64",
                               6:"torch.float32",
                               5:"torch.float16",
                               4:"torch.int64",
                               3:"torch.int32",
                               2:"torch.int16",
                               1:"torch.int8",
                               0:"torch.unit8",
                               None:"torch.int64"} # Default is torch.int64
    if val in convert_torch_dtype_map:
        return _convert_data_type(convert_torch_dtype_map[val])
    else:
        msg = "Torch data type value %d is not handled yet." % (val)
        raise NotImplementedError(msg) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:18,代码来源:pytorch.py

示例10: _create_typed_const

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def _create_typed_const(data, dtype):
    """create a (scalar) constant of given value and dtype.
       dtype should be a TVM dtype"""

    if dtype == "float64":
        typed_data = _expr.const(np.float64(data), dtype=dtype)
    elif dtype == "float32":
        typed_data = _expr.const(np.float32(data), dtype=dtype)
    elif dtype == "float16":
        typed_data = _expr.const(np.float16(data), dtype=dtype)
    elif dtype == "int64":
        typed_data = _expr.const(np.int64(data), dtype=dtype)
    elif dtype == "int32":
        typed_data = _expr.const(np.int32(data), dtype=dtype)
    elif dtype == "int16":
        typed_data = _expr.const(np.int16(data), dtype=dtype)
    elif dtype == "int8":
        typed_data = _expr.const(np.int8(data), dtype=dtype)
    elif dtype == "uint8":
        typed_data = _expr.const(np.uint8(data), dtype=dtype)
    else:
        raise NotImplementedError("input_type {} is not handled yet".format(dtype))
    return typed_data 
开发者ID:apache,项目名称:incubator-tvm,代码行数:25,代码来源:pytorch.py

示例11: _generate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def _generate(num_channels, compression_level, bitrate):
    org_path = 'original.wav'
    ops_path = f'{bitrate}_{compression_level}_{num_channels}ch.opus'

    # Note: ffmpeg forces sample rate 48k Hz for opus https://stackoverflow.com/a/39186779
    # 1. generate original wav
    data = torch.linspace(-32768, 32767, 32768, dtype=torch.int16).repeat([num_channels, 1]).t()
    scipy.io.wavfile.write(org_path, 48000, data.numpy())
    # 2. convert to opus
    convert_to_opus(org_path, ops_path, bitrate=bitrate, compression_level=compression_level) 
开发者ID:pytorch,项目名称:audio,代码行数:12,代码来源:generate_opus.py

示例12: encode_cdf

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def encode_cdf(cdf, sym):
    """
    :param cdf: CDF as 1HWLp, as int16, on CPU!
    :param sym: the symbols to encode, as int16, on CPU
    :return: byte-string, encoding `sym`
    """
    if cdf.is_cuda or sym.is_cuda:
        raise ValueError('CDF and symbols must be on CPU for `encode_cdf`')
    # encode_cdf is defined in both backends, so doesn't matter which one we use!
    return any_backend.encode_cdf(cdf, sym) 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:12,代码来源:torchac.py

示例13: decode_cdf

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def decode_cdf(cdf, input_string):
    """
    :param cdf: CDF as 1HWLp, as int16, on CPU
    :param input_string: byte-string, encoding some symbols `sym`.
    :return: decoded `sym`.
    """
    if cdf.is_cuda:
        raise ValueError('CDF must be on CPU for `decode_cdf`')
    # encode_cdf is defined in both backends, so doesn't matter which one we use!
    return any_backend.decode_cdf(cdf, input_string) 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:12,代码来源:torchac.py

示例14: _renorm_cast_cdf_

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def _renorm_cast_cdf_(cdf, precision):
    Lp = cdf.shape[-1]
    finals = 1  # NHW1
    # RENORMALIZATION_FACTOR in cuda
    f = torch.tensor(2, dtype=torch.float32, device=cdf.device).pow_(precision)
    cdf = cdf.mul((f - (Lp - 1)) / finals)  # TODO
    cdf = cdf.round()
    cdf = cdf.to(dtype=torch.int16, non_blocking=True)
    r = torch.arange(Lp, dtype=torch.int16, device=cdf.device)
    cdf.add_(r)
    return cdf 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:13,代码来源:torchac.py

示例15: encode_scale

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int16 [as 别名]
def encode_scale(self, scale, dmll, out, img, fout):
        """ Encode scale `scale`. """
        l = out.P[scale]
        bn = out.bn[scale] if scale != 0 else img
        S = out.S[scale]

        # shape used for all!
        write_shape(S.shape, fout)
        overhead_bytes = 5
        overhead_bytes += 4 * S.shape[1]

        r = ArithmeticCoder(dmll.L)

        # We encode channel by channel, because that's what's needed for the RGB scale. For s > 0, this could be done
        # in parallel for all channels
        def encoder(c, C_cur):
            S_c = S[:, c, ...].to(torch.int16)
            encoded = r.range_encode(S_c, cdf=C_cur, time_logger=self.times)
            write_num_bytes_encoded(len(encoded), fout)
            fout.write(encoded)
            # yielding always bottleneck and extra_info
            return bn[:, c, ...], len(encoded)

        with self.times.prefix_scope('encode scale'):
            with self.times.run('total'):
                _, entropy_coding_bytes_per_c = \
                    self.code_with_cdf(l, bn.shape, encoder, dmll)

        # --- cleanup
        out.P[scale] = None
        out.bn[scale] = None
        out.S[scale] = None
        # ---

        return sum(entropy_coding_bytes_per_c) 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:37,代码来源:bitcoding.py


注:本文中的torch.int16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。