本文整理汇总了Python中torch.int32方法的典型用法代码示例。如果您正苦于以下问题:Python torch.int32方法的具体用法?Python torch.int32怎么用?Python torch.int32使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.int32方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: normalize_wav
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
if tensor.dtype == torch.float32:
pass
elif tensor.dtype == torch.int32:
tensor = tensor.to(torch.float32)
tensor[tensor > 0] /= 2147483647.
tensor[tensor < 0] /= 2147483648.
elif tensor.dtype == torch.int16:
tensor = tensor.to(torch.float32)
tensor[tensor > 0] /= 32767.
tensor[tensor < 0] /= 32768.
elif tensor.dtype == torch.uint8:
tensor = tensor.to(torch.float32) - 128
tensor[tensor > 0] /= 127.
tensor[tensor < 0] /= 128.
return tensor
示例2: generate_iters_indices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def generate_iters_indices(self, num_of_iters):
from_iter = len(self.iter_indices_per_iteration)
for iter_num in range(from_iter, from_iter+num_of_iters):
# Get random number of samples per task (according to iteration distribution)
tsks = Categorical(probs=self.tasks_probs_over_iterations[iter_num]).sample(torch.Size([self.samples_in_batch]))
# Generate samples indices for iter_num
iter_indices = torch.zeros(0, dtype=torch.int32)
for task_idx in range(self.num_of_tasks):
if self.tasks_probs_over_iterations[iter_num][task_idx] > 0:
num_samples_from_task = (tsks == task_idx).sum().item()
self.samples_distribution_over_time[task_idx].append(num_samples_from_task)
# Randomize indices for each task (to allow creation of random task batch)
tasks_inner_permute = np.random.permutation(len(self.tasks_samples_indices[task_idx]))
rand_indices_of_task = tasks_inner_permute[:num_samples_from_task]
iter_indices = torch.cat([iter_indices, self.tasks_samples_indices[task_idx][rand_indices_of_task]])
else:
self.samples_distribution_over_time[task_idx].append(0)
self.iter_indices_per_iteration.append(iter_indices.tolist())
示例3: compute_logits
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def compute_logits(self, token_ids: torch.Tensor) -> torch.Tensor:
"""
Implements a language model, where each output is conditional on the current
input and inputs processed so far.
Args:
inputs: int32 tensor of shape [B, T], storing integer IDs of tokens.
Returns:
torch.float32 tensor of shape [B, T, V], storing the distribution over output symbols
for each timestep for each batch element.
"""
# TODO 5# 1) Embed tokens
# TODO 5# 2) Run RNN on embedded tokens
# TODO 5# 3) Project RNN outputs onto the vocabulary to obtain logits.
return rnn_output_logits
示例4: test_one_hot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def test_one_hot(self):
"""
Tests a torch one hot function.
"""
if get_backend() == "pytorch":
# Flat action array.
inputs = torch.tensor([0, 1], dtype=torch.int32)
one_hot = pytorch_one_hot(inputs, depth=2)
expected = torch.tensor([[1., 0.], [0., 1.]])
recursive_assert_almost_equal(one_hot, expected)
# Container space.
inputs = torch.tensor([[0, 3, 2],[1, 2, 0]], dtype=torch.int32)
one_hot = pytorch_one_hot(inputs, depth=4)
expected = torch.tensor([[[1, 0, 0, 0],[0, 0, 0, 1],[0, 0, 1, 0]],[[0, 1, 0, 0],[0, 0, 1, 0],[1, 0, 0, 0,]]],
dtype=torch.int32)
recursive_assert_almost_equal(one_hot, expected)
示例5: _graph_fn_call
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def _graph_fn_call(self, inputs):
if self.backend == "python" or get_backend() == "python":
if isinstance(inputs, list):
inputs = np.asarray(inputs)
return inputs.astype(dtype=util.convert_dtype(self.to_dtype, to="np"))
elif get_backend() == "pytorch":
torch_dtype = util.convert_dtype(self.to_dtype, to="pytorch")
if torch_dtype == torch.float or torch.float32:
return inputs.float()
elif torch_dtype == torch.int or torch.int32:
return inputs.int()
elif torch_dtype == torch.uint8:
return inputs.byte()
elif get_backend() == "tf":
in_space = get_space_from_op(inputs)
to_dtype = util.convert_dtype(self.to_dtype, to="tf")
if inputs.dtype != to_dtype:
ret = tf.cast(x=inputs, dtype=to_dtype)
if in_space.has_batch_rank is True:
ret._batch_rank = 0 if in_space.time_major is False else 1
if in_space.has_time_rank is True:
ret._time_rank = 0 if in_space.time_major is True else 1
return ret
else:
return inputs
示例6: torch_dtype_to_np_dtype
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例7: certify_inputs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def certify_inputs(log_probs, labels, lengths, label_lengths):
check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a length per example.")
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a label length per example.")
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 1, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError("Input length mismatch")
if U != max_U + 1:
raise ValueError("Output length mismatch")
示例8: make_numpy_ndarray
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def make_numpy_ndarray(**kwargs):
np_array = numpy.random.random((2, 2))
def compare(detailed, original):
"""Compare numpy arrays"""
assert numpy.array_equal(detailed, original)
return True
return [
{
"value": np_array,
"simplified": (
CODE[type(np_array)],
(
np_array.tobytes(), # (bytes) serialized bin
(CODE[tuple], (2, 2)), # (tuple) shape
(CODE[str], (b"float64",)), # (str) dtype.name
),
),
"cmp_detailed": compare,
}
]
# numpy.float32, numpy.float64, numpy.int32, numpy.int64
示例9: conv2d_rounding
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def conv2d_rounding(A, B):
"""
chunked conv2d which converts datatypes and filters values,
converting > 127 to 127 and < -128 to -128
"""
# C = np.zeros((N, P, Q, K)).astype("int32") # output
# for b in range(N):
# for p in range(P):
# for q in range(Q):
# for k in range(K):
# for rc in range(RC):
# for rr in range(R):
# for rs in range(S):
# C[b, p, q, k] += A[b, p+rr, q+rs, rc] * B[rr, rs, rc, k]
import torch
A = torch.tensor(A, dtype=torch.int32).permute(0, 3, 1, 2)
B = torch.tensor(B, dtype=torch.int32).permute(3, 2, 0, 1)
C = torch.nn.functional.conv2d(A, B, bias=None, stride=1, padding=0, dilation=1, groups=1)
C = C.permute(0, 2, 3, 1).numpy()
C[C > 127] = 127
C[C < -128] = -128
return C.astype(np.int8)
示例10: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
示例11: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol
self.atol = atol
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
示例12: create_buffers
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
示例13: initial
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
示例14: step
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
示例15: update_dtype
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int32 [as 别名]
def update_dtype(self, old_dtype):
updated = {}
for k, v in old_dtype.items():
if v == np.float32:
dt = torch.float32
elif v == np.float64:
dt = torch.float64
elif v == np.float16:
dt = torch.float16
elif v == np.uint8:
dt = torch.uint8
elif v == np.int8:
dt = torch.int8
elif v == np.int16:
dt = torch.int16
elif v == np.int32:
dt = torch.int32
elif v == np.int16:
dt = torch.int16
else:
raise ValueError("Unsupported dtype {}".format(v))
updated[k] = dt
return updated