本文整理汇总了Python中torch.Tensor.permute方法的典型用法代码示例。如果您正苦于以下问题:Python Tensor.permute方法的具体用法?Python Tensor.permute怎么用?Python Tensor.permute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.Tensor
的用法示例。
在下文中一共展示了Tensor.permute方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: split_heads
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import permute [as 别名]
def split_heads(self, x: torch.Tensor, k: bool = False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
示例2: multioutput_to_batch_mode_transform
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import permute [as 别名]
def multioutput_to_batch_mode_transform(
train_X: Tensor,
train_Y: Tensor,
num_outputs: int,
train_Yvar: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
r"""Transforms training inputs for a multi-output model.
Used for multi-output models that internally are represented by a
batched single output model, where each output is modeled as an
independent batch.
Args:
train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training
features.
train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of
training observations.
num_outputs: number of outputs
train_Yvar: A `batch_shape x n x (o)`
tensor of observed measurement noise.
Returns:
3-element tuple containing
- A `(o) x batch_shape x n x d` tensor of training features.
- A `(o) x batch_shape x n` tensor of training observations.
- A `(o) x batch_shape x n` tensor observed measurement noise.
"""
input_batch_shape = train_X.shape[:-2]
if num_outputs > 1:
# make train_Y `o x batch_shape x n`
train_Y = train_Y.permute(-1, *range(train_Y.dim() - 1))
# expand train_X to `o x batch_shape x n x d`
train_X = train_X.unsqueeze(0).expand(
torch.Size([num_outputs] + [-1] * train_X.dim())
)
if train_Yvar is not None:
# make train_Yvar `o x batch_shape x n`
train_Yvar = train_Yvar.permute(-1, *range(train_Yvar.dim() - 1))
elif train_Y.dim() > 1:
# single output, make train_Y `batch_shape x n`
target_shape = input_batch_shape + torch.Size([-1])
train_Y = train_Y.view(target_shape)
if train_Yvar is not None:
# make train_Yvar `batch_shape x n`
train_Yvar = train_Yvar.view(target_shape)
return train_X, train_Y, train_Yvar
示例3: forward
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import permute [as 别名]
def forward(self, input: torch.Tensor) -> torch.Tensor:
# split dimension src_dim into part_a x part_b
input = input.reshape(input.shape[:self.src_dim] + (self.part_a, self.part_b) + input.shape[self.src_dim + 1:])
dest = self.low
src_dim = self.src_dim
if self.high != src_dim:
dest = self.high
else:
src_dim += 1
# rotate dimension permutation list
perm = list(range(len(input.shape)))
step = 1 if dest > src_dim else -1
for x in range(src_dim, dest, step):
perm[x], perm[x + step] = perm[x + step], perm[x]
input = input.permute(perm)
o = input.reshape(input.shape[:dest] + (input.shape[dest] * input.shape[dest + 1],) + input.shape[dest + 2:])
return o
示例4: merge_heads
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import permute [as 别名]
def merge_heads(self, x: torch.Tensor):
# pylint: disable=no-self-use
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states