当前位置: 首页>>代码示例>>Python>>正文


Python Tensor.unsqueeze方法代码示例

本文整理汇总了Python中torch.Tensor.unsqueeze方法的典型用法代码示例。如果您正苦于以下问题:Python Tensor.unsqueeze方法的具体用法?Python Tensor.unsqueeze怎么用?Python Tensor.unsqueeze使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.Tensor的用法示例。


在下文中一共展示了Tensor.unsqueeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
 def forward(self,  # pylint: disable=arguments-differ
             matrix_1: torch.Tensor,
             matrix_2: torch.Tensor) -> torch.Tensor:
     combined_tensors = util.combine_tensors_and_multiply(self._combination,
                                                          [matrix_1.unsqueeze(2), matrix_2.unsqueeze(1)],
                                                          self._weight_vector)
     return self._activation(combined_tensors + self._bias)
开发者ID:apmoore1,项目名称:allennlp,代码行数:9,代码来源:linear_matrix_attention.py

示例2: PeepholeLSTMCell

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def PeepholeLSTMCell(input: torch.Tensor,
                     hidden: Tuple[torch.Tensor, torch.Tensor],
                     w_ih: torch.Tensor,
                     w_hh: torch.Tensor,
                     w_ip: torch.Tensor,
                     w_fp: torch.Tensor,
                     w_op: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    An LSTM cell with peephole connections without biases.

    Mostly ripped from the pytorch autograd lstm implementation.
    """
    hx, cx = hidden
    gates = F.linear(input, w_ih) + F.linear(hx, w_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    peep_i = w_ip.unsqueeze(0).expand_as(cx) * cx
    ingate = ingate + peep_i
    peep_f = w_fp.unsqueeze(0).expand_as(cx) * cx
    forgetgate = forgetgate + peep_f

    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    cy = (forgetgate * cx) + (ingate * cellgate)
    peep_o = w_op.unsqueeze(0).expand_as(cy) * cy
    outgate = outgate + peep_o
    hy = outgate * F.tanh(cy)

    return hy, cy
开发者ID:mittagessen,项目名称:kraken,代码行数:32,代码来源:layers.py

示例3: neg_hartmann6

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def neg_hartmann6(X: Tensor) -> Tensor:
    r"""Negative Hartmann6 test function.

    Six-dimensional function (typically evaluated on `[0, 1]^6`)

        `H(x) = - sum_{i=1}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij)**2 )`

    H has a 6 local minima and a global minimum at

        `z = (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573)`

    with `H(z) = -3.32237`

    Args:
        X: A Tensor of size `6` or `k x 6` (k batch evaluations).

    Returns:
        `-H(X)`, the negative value of the standard Hartmann6 function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    inner_sum = torch.sum(X.new(A) * (X.unsqueeze(1) - 0.0001 * X.new(P)) ** 2, dim=2)
    H = -torch.sum(X.new(ALPHA) * torch.exp(-inner_sum), dim=1)
    result = -H
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:27,代码来源:hartmann6.py

示例4: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
 def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
     tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0],
                                                   matrix_1.size()[1],
                                                   matrix_2.size()[1],
                                                   matrix_1.size()[2])
     tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0],
                                                   matrix_1.size()[1],
                                                   matrix_2.size()[1],
                                                   matrix_2.size()[2])
     return self._similarity_function(tiled_matrix_1, tiled_matrix_2)
开发者ID:apmoore1,项目名称:allennlp,代码行数:12,代码来源:legacy_matrix_attention.py

示例5: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:

        if self._use_input_biases:
            bias1 = matrix_1.new_ones(matrix_1.size()[:-1] + (1,))
            bias2 = matrix_2.new_ones(matrix_2.size()[:-1] + (1,))

            matrix_1 = torch.cat([matrix_1, bias1], -1)
            matrix_2 = torch.cat([matrix_2, bias2], -1)
        intermediate = torch.matmul(matrix_1.unsqueeze(1), self._weight_matrix.unsqueeze(0))
        final = torch.matmul(intermediate, matrix_2.unsqueeze(1).transpose(2, 3))
        return self._activation(final.squeeze(1) + self._bias)
开发者ID:pyknife,项目名称:allennlp,代码行数:13,代码来源:bilinear_matrix_attention.py

示例6: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def forward(self, tokens: torch.Tensor, mask: torch.Tensor = None):  #pylint: disable=arguments-differ
        if mask is not None:
            tokens = tokens * mask.unsqueeze(-1).float()

        # Our input has shape `(batch_size, num_tokens, embedding_dim)`, so we sum out the `num_tokens`
        # dimension.
        summed = tokens.sum(1)

        if self._averaged:
            if mask is not None:
                lengths = get_lengths_from_binary_sequence_mask(mask)
                length_mask = (lengths > 0)

                # Set any length 0 to 1, to avoid dividing by zero.
                lengths = torch.max(lengths, Variable(lengths.data.new().resize_(1).fill_(1)))
            else:
                lengths = Variable(tokens.data.new().resize_(1).fill_(tokens.size(1)), requires_grad=False)
                length_mask = None

            summed = summed / lengths.unsqueeze(-1).float()

            if length_mask is not None:
                summed = summed * (length_mask > 0).float().unsqueeze(-1)

        return summed
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:27,代码来源:boe_encoder.py

示例7: add_output_dim

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def add_output_dim(X: Tensor, original_batch_shape: torch.Size) -> Tuple[Tensor, int]:
    r"""Inserts the output dimension at the correct location. The trailing batch
        dimensions of X must match the original batch dimensions of the training
        inputs, but can also include extra batch dimensions.

    Args:
        X: A `(new_batch_shape) x (original_batch_shape) x n x d` tensor of features.
        original_batch_shape: the batch shape of the model's training inputs.

    Returns:
        2-element tuple containing

        - A `(new_batch_shape) x o x (original_batch_shape) x n x d` tensor of
        features.
        - The index corresponding to the output dimension.
    """
    num_original_batch_dims = len(original_batch_shape)
    if X.shape[-(num_original_batch_dims + 2) : -2] != original_batch_shape:
        raise ValueError(
            "The trailing batch dimensions of X must match the batch dimensions of the"
            " training inputs."
        )
    # insert `t` dimension
    output_dim_idx = len(X.shape) - (num_original_batch_dims + 2)
    X = X.unsqueeze(output_dim_idx)
    return X, output_dim_idx
开发者ID:saschwan,项目名称:botorch,代码行数:28,代码来源:utils.py

示例8: neg_branin

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def neg_branin(X: Tensor) -> Tensor:
    r"""Negative Branin test function.

    Two-dimensional function (usually evaluated on `[-5, 10] x [0, 15]`):

        `B(x) = (x2 - b x_1^2 + c x_1 - r)^2 + 10 (1-t) cos(x_1) + 10`

    B has 3 minimizers for its global minimum at

        `z_1 = (-pi, 12.275), z_2 = (pi, 2.275), z_3 = (9.42478, 2.475)`

    with `B(z_i) = -0.397887`

    Args:
        X: A Tensor of size `2` or `k x 2` (`k` batch evaluations).

    Returns:
        `-B(X)`, the negative value of the standard Branin function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    t1 = X[:, 1] - 5.1 / (4 * math.pi ** 2) * X[:, 0] ** 2 + 5 / math.pi * X[:, 0] - 6
    t2 = 10 * (1 - 1 / (8 * math.pi)) * torch.cos(X[:, 0])
    B = t1 ** 2 + t2 + 10
    result = -B
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:28,代码来源:branin.py

示例9: dup_innermost

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def dup_innermost(t: Tensor, curr_dims: int):
    """ Expand the dimension and duplicate the innermost value.
    :param curr_dims: current dimensions, will unsqueeze at the last dimension and expand by 2.
    """
    sizes = [-1] * (curr_dims + 1)
    sizes[-1] = 2
    return t.unsqueeze(dim=curr_dims).expand(*sizes)
开发者ID:AndriyLin,项目名称:Utils,代码行数:9,代码来源:pytorch.py

示例10: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def forward(self, tokens: torch.Tensor, mask: torch.Tensor):  # pylint: disable=arguments-differ
        if mask is not None:
            tokens = tokens * mask.unsqueeze(-1).float()

        # Our input is expected to have shape `(batch_size, num_tokens, embedding_dim)`.  The
        # convolution layers expect input of shape `(batch_size, in_channels, sequence_length)`,
        # where the conv layer `in_channels` is our `embedding_dim`.  We thus need to transpose the
        # tensor first.
        tokens = torch.transpose(tokens, 1, 2)
        # Each convolution layer returns output of size `(batch_size, num_filters, pool_length)`,
        # where `pool_length = num_tokens - ngram_size + 1`.  We then do an activation function,
        # then do max pooling over each filter for the whole input sequence.  Because our max
        # pooling is simple, we just use `torch.max`.  The resultant tensor of has shape
        # `(batch_size, num_conv_layers * num_filters)`, which then gets projected using the
        # projection layer, if requested.

        filter_outputs = []
        for i in range(len(self._convolution_layers)):
            convolution_layer = getattr(self, 'conv_layer_{}'.format(i))
            filter_outputs.append(
                    self._activation(convolution_layer(tokens)).max(dim=2)[0]
            )

        # Now we have a list of `num_conv_layers` tensors of shape `(batch_size, num_filters)`.
        # Concatenating them gives us a tensor of shape `(batch_size, num_filters * num_conv_layers)`.
        maxpool_output = torch.cat(filter_outputs, dim=1) if len(filter_outputs) > 1 else filter_outputs[0]

        if self.projection_layer:
            result = self.projection_layer(maxpool_output)
        else:
            result = maxpool_output
        return result
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:34,代码来源:cnn_encoder.py

示例11: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def forward(self,  # pylint: disable=arguments-differ
                matrix_1: torch.Tensor,
                matrix_2: torch.Tensor) -> torch.Tensor:
        # TODO(mattg): Remove the need for this tiling.
        # https://github.com/allenai/allennlp/pull/1235#issuecomment-391540133
        tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0],
                                                      matrix_1.size()[1],
                                                      matrix_2.size()[1],
                                                      matrix_1.size()[2])
        tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0],
                                                      matrix_1.size()[1],
                                                      matrix_2.size()[1],
                                                      matrix_2.size()[2])

        combined_tensors = util.combine_tensors(self._combination, [tiled_matrix_1, tiled_matrix_2])
        dot_product = torch.matmul(combined_tensors, self._weight_vector)
        return self._activation(dot_product + self._bias)
开发者ID:pyknife,项目名称:allennlp,代码行数:19,代码来源:linear_matrix_attention.py

示例12: __call__

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def __call__(self,
                 predictions: torch.Tensor,
                 gold_labels: torch.Tensor,
                 mask: Optional[torch.Tensor] = None):
        """
        Parameters
        ----------
        predictions : ``torch.Tensor``, required.
            A tensor of predictions of shape (batch_size, ..., num_classes).
        gold_labels : ``torch.Tensor``, required.
            A tensor of integer class label of shape (batch_size, ...). It must be the same
            shape as the ``predictions`` tensor without the ``num_classes`` dimension.
        mask: ``torch.Tensor``, optional (default = None).
            A masking tensor the same size as ``gold_labels``.
        """
        predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)

        # Some sanity checks.
        num_classes = predictions.size(-1)
        if gold_labels.dim() != predictions.dim() - 1:
            raise ConfigurationError("gold_labels must have dimension == predictions.size() - 1 but "
                                     "found tensor of shape: {}".format(predictions.size()))
        if (gold_labels >= num_classes).any():
            raise ConfigurationError("A gold label passed to Categorical Accuracy contains an id >= {}, "
                                     "the number of classes.".format(num_classes))

        predictions = predictions.view((-1, num_classes))
        gold_labels = gold_labels.view(-1).long()
        if not self._tie_break:
            # Top K indexes of the predictions (or fewer, if there aren't K of them).
            # Special case topk == 1, because it's common and .max() is much faster than .topk().
            if self._top_k == 1:
                top_k = predictions.max(-1)[1].unsqueeze(-1)
            else:
                top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]

            # This is of shape (batch_size, ..., top_k).
            correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
        else:
            # prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts
            max_predictions = predictions.max(-1)[0]
            max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))
            # max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)
            # ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions
            # For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
            correct = max_predictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()
            tie_counts = max_predictions_mask.sum(-1)
            correct /= tie_counts.float()
            correct.unsqueeze_(-1)

        if mask is not None:
            correct *= mask.view(-1, 1).float()
            self.total_count += mask.sum()
        else:
            self.total_count += gold_labels.numel()
        self.correct_count += correct.sum()
开发者ID:apmoore1,项目名称:allennlp,代码行数:58,代码来源:categorical_accuracy.py

示例13: multi_perspective_match_pairwise

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
def multi_perspective_match_pairwise(vector1: torch.Tensor,
                                     vector2: torch.Tensor,
                                     weight: torch.Tensor,
                                     eps: float = 1e-8) -> torch.Tensor:
    """
    Calculate multi-perspective cosine matching between each time step of
    one vector and each time step of another vector.

    Parameters
    ----------
    vector1 : ``torch.Tensor``
        A tensor of shape ``(batch, seq_len1, hidden_size)``
    vector2 : ``torch.Tensor``
        A tensor of shape ``(batch, seq_len2, hidden_size)``
    weight : ``torch.Tensor``
        A tensor of shape ``(num_perspectives, hidden_size)``
    eps : ``float`` optional, (default = 1e-8)
        A small value to avoid zero division problem

    Returns
    -------
    A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting
    multi-perspective matching results
    """
    num_perspectives = weight.size(0)

    # (1, num_perspectives, 1, hidden_size)
    weight = weight.unsqueeze(0).unsqueeze(2)

    # (batch, num_perspectives, seq_len*, hidden_size)
    vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
    vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1)

    # (batch, num_perspectives, seq_len*, 1)
    vector1_norm = vector1.norm(p=2, dim=3, keepdim=True)
    vector2_norm = vector2.norm(p=2, dim=3, keepdim=True)

    # (batch, num_perspectives, seq_len1, seq_len2)
    mul_result = torch.matmul(vector1, vector2.transpose(2, 3))
    norm_value = vector1_norm * vector2_norm.transpose(2, 3)

    # (batch, seq_len1, seq_len2, num_perspectives)
    return (mul_result / norm_value.clamp(min=eps)).permute(0, 2, 3, 1)
开发者ID:apmoore1,项目名称:allennlp,代码行数:45,代码来源:bimpm_matching.py

示例14: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
 def forward(self, tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
     # pylint: disable=arguments-differ
     broadcast_mask = mask.unsqueeze(-1).float()
     num_elements = broadcast_mask.sum() * self.size
     mean = (tensor * broadcast_mask).sum() / num_elements
     masked_centered = (tensor - mean) * broadcast_mask
     std = torch.sqrt(
             (masked_centered * masked_centered).sum() / num_elements + self.eps
     )
     return self.gamma * (tensor - mean) / (std + self.eps) + self.beta
开发者ID:apmoore1,项目名称:allennlp,代码行数:12,代码来源:masked_layer_norm.py

示例15: forward

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import unsqueeze [as 别名]
    def forward(self, X: Tensor) -> Tensor:
        r"""Evaluate Expected Improvement on the candidate set X.

        Args:
            X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points.

        Returns:
            A `b1 x ... bk`-dim tensor of Noisy Expected Improvement values at
            the given design points `X`.
        """
        # add batch dimension for broadcasting to fantasy models
        return super().forward(X.unsqueeze(-3)).mean(dim=-1)
开发者ID:saschwan,项目名称:botorch,代码行数:14,代码来源:analytic.py


注:本文中的torch.Tensor.unsqueeze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。