當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.int16方法代碼示例

本文整理匯總了Python中torch.int16方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.int16方法的具體用法?Python torch.int16怎麽用?Python torch.int16使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.int16方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: normalize_wav

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
    if tensor.dtype == torch.float32:
        pass
    elif tensor.dtype == torch.int32:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 2147483647.
        tensor[tensor < 0] /= 2147483648.
    elif tensor.dtype == torch.int16:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 32767.
        tensor[tensor < 0] /= 32768.
    elif tensor.dtype == torch.uint8:
        tensor = tensor.to(torch.float32) - 128
        tensor[tensor > 0] /= 127.
        tensor[tensor < 0] /= 128.
    return tensor 
開發者ID:pytorch,項目名稱:audio,代碼行數:18,代碼來源:wav_utils.py

示例2: encode_uniform

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def encode_uniform(self, dmll, S, fout):
        """ encode coarsest scale, for which we assume a uniform prior. """
        write_shape(S.shape, fout)
        r = ArithmeticCoder(dmll.L)

        entropy_coding_bytes = 0
        with self.times.prefix_scope('uniform encode'):
            c_uniform = self._get_uniform_cdf(S.shape, dmll.L)
            for c in range(S.shape[1]):
                S_c = S[:, c, ...].to(torch.int16)
                encoded = r.range_encode(S_c, c_uniform, self.times)
                write_num_bytes_encoded(len(encoded), fout)
                entropy_coding_bytes += len(encoded)
                fout.write(encoded)

        return entropy_coding_bytes 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:18,代碼來源:bitcoding.py

示例3: range_decode

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def range_decode(self, encoded_bytes, cdf, time_logger: StackTimeLogger = no_op.NoOp):
        """
        :param encoded_bytes: bytes encoded by range_encode
        :param cdf: cdf to use, either a NHWLp matrix or instance of CDFOut
        :return: decoded matrix as np.int16, NHW
        """
        if isinstance(cdf, CDFOut):
            logit_probs_c_sm, means_c, log_scales_c, K, targets = cdf

            N, _, H, W = means_c.shape

            with time_logger.run('ac.encode'):
                decoded = torchac.decode_logistic_mixture(
                        targets, means_c, log_scales_c, logit_probs_c_sm, encoded_bytes)

        else:
            N, H, W, Lp = cdf.shape
            assert Lp == self.L + 1, (Lp, self.L)

            with time_logger.run('ac.encode'):
                decoded = torchac.decode_cdf(cdf, encoded_bytes)

        return decoded.reshape(N, H, W) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:25,代碼來源:coders.py

示例4: torch_dtype_to_np_dtype

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:24,代碼來源:types.py

示例5: update_dtype

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def update_dtype(self, old_dtype):
        updated = {}
        for k, v in old_dtype.items():
            if v == np.float32:
                dt = torch.float32
            elif v == np.float64:
                dt = torch.float64
            elif v == np.float16:
                dt = torch.float16
            elif v == np.uint8:
                dt = torch.uint8
            elif v == np.int8:
                dt = torch.int8
            elif v == np.int16:
                dt = torch.int16
            elif v == np.int32:
                dt = torch.int32
            elif v == np.int16:
                dt = torch.int16
            else:
                raise ValueError("Unsupported dtype {}".format(v))
            updated[k] = dt
        return updated 
開發者ID:heronsystems,項目名稱:adeptRL,代碼行數:25,代碼來源:ops.py

示例6: sanitize_infinity

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def sanitize_infinity(dtype):
    """
    Returns largest possible value for the specified dtype.

    Parameters:
    -----------
    dtype: torch dtype

    Returns:
    --------
    large_enough: largest possible value for the given dtype
    """
    if dtype is torch.int8:
        large_enough = (1 << 7) - 1
    elif dtype is torch.int16:
        large_enough = (1 << 15) - 1
    elif dtype is torch.int32:
        large_enough = (1 << 31) - 1
    elif dtype is torch.int64:
        large_enough = (1 << 63) - 1
    else:
        large_enough = float("inf")

    return large_enough 
開發者ID:helmholtz-analytics,項目名稱:heat,代碼行數:26,代碼來源:constants.py

示例7: test_canonical_heat_type

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def test_canonical_heat_type(self):
        self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
        self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
        self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
        self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
        self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
        self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)

        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type({})
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(object)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(1)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type("i7") 
開發者ID:helmholtz-analytics,項目名稱:heat,代碼行數:18,代碼來源:test_types.py

示例8: pytorch_dtype_to_type

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def pytorch_dtype_to_type(dtype):
    """Map a pytorch dtype to a myia type."""
    import torch

    _type_map = {
        torch.int8: Int[8],
        torch.int16: Int[16],
        torch.int32: Int[32],
        torch.int64: Int[64],
        torch.uint8: UInt[8],
        torch.float16: Float[16],
        torch.float32: Float[32],
        torch.float64: Float[64],
        torch.bool: Bool,
    }
    if dtype not in _type_map:
        raise TypeError(f"Unsupported dtype {dtype}")
    return _type_map[dtype] 
開發者ID:mila-iqia,項目名稱:myia,代碼行數:20,代碼來源:pytorch_abstract_types.py

示例9: _convert_dtype_value

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def _convert_dtype_value(val):
    """converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
    convert_torch_dtype_map = {7:"torch.float64",
                               6:"torch.float32",
                               5:"torch.float16",
                               4:"torch.int64",
                               3:"torch.int32",
                               2:"torch.int16",
                               1:"torch.int8",
                               0:"torch.unit8",
                               None:"torch.int64"} # Default is torch.int64
    if val in convert_torch_dtype_map:
        return _convert_data_type(convert_torch_dtype_map[val])
    else:
        msg = "Torch data type value %d is not handled yet." % (val)
        raise NotImplementedError(msg) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:18,代碼來源:pytorch.py

示例10: _create_typed_const

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def _create_typed_const(data, dtype):
    """create a (scalar) constant of given value and dtype.
       dtype should be a TVM dtype"""

    if dtype == "float64":
        typed_data = _expr.const(np.float64(data), dtype=dtype)
    elif dtype == "float32":
        typed_data = _expr.const(np.float32(data), dtype=dtype)
    elif dtype == "float16":
        typed_data = _expr.const(np.float16(data), dtype=dtype)
    elif dtype == "int64":
        typed_data = _expr.const(np.int64(data), dtype=dtype)
    elif dtype == "int32":
        typed_data = _expr.const(np.int32(data), dtype=dtype)
    elif dtype == "int16":
        typed_data = _expr.const(np.int16(data), dtype=dtype)
    elif dtype == "int8":
        typed_data = _expr.const(np.int8(data), dtype=dtype)
    elif dtype == "uint8":
        typed_data = _expr.const(np.uint8(data), dtype=dtype)
    else:
        raise NotImplementedError("input_type {} is not handled yet".format(dtype))
    return typed_data 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:25,代碼來源:pytorch.py

示例11: _generate

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def _generate(num_channels, compression_level, bitrate):
    org_path = 'original.wav'
    ops_path = f'{bitrate}_{compression_level}_{num_channels}ch.opus'

    # Note: ffmpeg forces sample rate 48k Hz for opus https://stackoverflow.com/a/39186779
    # 1. generate original wav
    data = torch.linspace(-32768, 32767, 32768, dtype=torch.int16).repeat([num_channels, 1]).t()
    scipy.io.wavfile.write(org_path, 48000, data.numpy())
    # 2. convert to opus
    convert_to_opus(org_path, ops_path, bitrate=bitrate, compression_level=compression_level) 
開發者ID:pytorch,項目名稱:audio,代碼行數:12,代碼來源:generate_opus.py

示例12: encode_cdf

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def encode_cdf(cdf, sym):
    """
    :param cdf: CDF as 1HWLp, as int16, on CPU!
    :param sym: the symbols to encode, as int16, on CPU
    :return: byte-string, encoding `sym`
    """
    if cdf.is_cuda or sym.is_cuda:
        raise ValueError('CDF and symbols must be on CPU for `encode_cdf`')
    # encode_cdf is defined in both backends, so doesn't matter which one we use!
    return any_backend.encode_cdf(cdf, sym) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:12,代碼來源:torchac.py

示例13: decode_cdf

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def decode_cdf(cdf, input_string):
    """
    :param cdf: CDF as 1HWLp, as int16, on CPU
    :param input_string: byte-string, encoding some symbols `sym`.
    :return: decoded `sym`.
    """
    if cdf.is_cuda:
        raise ValueError('CDF must be on CPU for `decode_cdf`')
    # encode_cdf is defined in both backends, so doesn't matter which one we use!
    return any_backend.decode_cdf(cdf, input_string) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:12,代碼來源:torchac.py

示例14: _renorm_cast_cdf_

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def _renorm_cast_cdf_(cdf, precision):
    Lp = cdf.shape[-1]
    finals = 1  # NHW1
    # RENORMALIZATION_FACTOR in cuda
    f = torch.tensor(2, dtype=torch.float32, device=cdf.device).pow_(precision)
    cdf = cdf.mul((f - (Lp - 1)) / finals)  # TODO
    cdf = cdf.round()
    cdf = cdf.to(dtype=torch.int16, non_blocking=True)
    r = torch.arange(Lp, dtype=torch.int16, device=cdf.device)
    cdf.add_(r)
    return cdf 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:13,代碼來源:torchac.py

示例15: encode_scale

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import int16 [as 別名]
def encode_scale(self, scale, dmll, out, img, fout):
        """ Encode scale `scale`. """
        l = out.P[scale]
        bn = out.bn[scale] if scale != 0 else img
        S = out.S[scale]

        # shape used for all!
        write_shape(S.shape, fout)
        overhead_bytes = 5
        overhead_bytes += 4 * S.shape[1]

        r = ArithmeticCoder(dmll.L)

        # We encode channel by channel, because that's what's needed for the RGB scale. For s > 0, this could be done
        # in parallel for all channels
        def encoder(c, C_cur):
            S_c = S[:, c, ...].to(torch.int16)
            encoded = r.range_encode(S_c, cdf=C_cur, time_logger=self.times)
            write_num_bytes_encoded(len(encoded), fout)
            fout.write(encoded)
            # yielding always bottleneck and extra_info
            return bn[:, c, ...], len(encoded)

        with self.times.prefix_scope('encode scale'):
            with self.times.run('total'):
                _, entropy_coding_bytes_per_c = \
                    self.code_with_cdf(l, bn.shape, encoder, dmll)

        # --- cleanup
        out.P[scale] = None
        out.bn[scale] = None
        out.S[scale] = None
        # ---

        return sum(entropy_coding_bytes_per_c) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:37,代碼來源:bitcoding.py


注:本文中的torch.int16方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。