当前位置: 首页>>代码示例>>Python>>正文


Python Tensor.ndimension方法代码示例

本文整理汇总了Python中torch.Tensor.ndimension方法的典型用法代码示例。如果您正苦于以下问题:Python Tensor.ndimension方法的具体用法?Python Tensor.ndimension怎么用?Python Tensor.ndimension使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.Tensor的用法示例。


在下文中一共展示了Tensor.ndimension方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: neg_hartmann6

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import ndimension [as 别名]
def neg_hartmann6(X: Tensor) -> Tensor:
    r"""Negative Hartmann6 test function.

    Six-dimensional function (typically evaluated on `[0, 1]^6`)

        `H(x) = - sum_{i=1}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij)**2 )`

    H has a 6 local minima and a global minimum at

        `z = (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573)`

    with `H(z) = -3.32237`

    Args:
        X: A Tensor of size `6` or `k x 6` (k batch evaluations).

    Returns:
        `-H(X)`, the negative value of the standard Hartmann6 function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    inner_sum = torch.sum(X.new(A) * (X.unsqueeze(1) - 0.0001 * X.new(P)) ** 2, dim=2)
    H = -torch.sum(X.new(ALPHA) * torch.exp(-inner_sum), dim=1)
    result = -H
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:27,代码来源:hartmann6.py

示例2: neg_branin

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import ndimension [as 别名]
def neg_branin(X: Tensor) -> Tensor:
    r"""Negative Branin test function.

    Two-dimensional function (usually evaluated on `[-5, 10] x [0, 15]`):

        `B(x) = (x2 - b x_1^2 + c x_1 - r)^2 + 10 (1-t) cos(x_1) + 10`

    B has 3 minimizers for its global minimum at

        `z_1 = (-pi, 12.275), z_2 = (pi, 2.275), z_3 = (9.42478, 2.475)`

    with `B(z_i) = -0.397887`

    Args:
        X: A Tensor of size `2` or `k x 2` (`k` batch evaluations).

    Returns:
        `-B(X)`, the negative value of the standard Branin function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    t1 = X[:, 1] - 5.1 / (4 * math.pi ** 2) * X[:, 0] ** 2 + 5 / math.pi * X[:, 0] - 6
    t2 = 10 * (1 - 1 / (8 * math.pi)) * torch.cos(X[:, 0])
    B = t1 ** 2 + t2 + 10
    result = -B
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:28,代码来源:branin.py

示例3: neg_michalewicz

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import ndimension [as 别名]
def neg_michalewicz(X: Tensor) -> Tensor:
    r"""Negative 10-dim Michalewicz test function.

    10-dim function (usually evaluated on hypercube [0, pi]^10):

        `M(x) = sum_{i=1}^10 sin(x_i) (sin(i x_i^2 / pi)^20)`

    Args:
        X: A Tensor of size `10` or `k x 10` (`k` batch evaluations).

    Returns:
        `-M(X)`, the negative value of the Michalewicz function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    a = 1 + torch.arange(10, device=X.device, dtype=X.dtype)
    result = torch.sum(torch.sin(X) * torch.sin(a * X ** 2 / math.pi) ** 20, dim=-1)
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:20,代码来源:michalewicz.py

示例4: neg_styblinski_tang

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import ndimension [as 别名]
def neg_styblinski_tang(X: Tensor) -> Tensor:
    r"""Negative Styblinski-Tang test function.

    d-dimensional function (usually evaluated on the hypercube `[-5, 5]^d`):

        `H(x) = 0.5 * sum_{i=1}^d (x_i^4 - 16 * x_i^2 + 5 * x_i)`

    H has a single global mininimum `H(z) = -39.166166 * d` at `z = [-2.903534]^d`

    Args:
        X: A Tensor of size `d` or `k x d` (`k` batch evaluations)

    Returns:
        `-H(X)`, the negative value of the standard Styblinski-Tang function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    H = 0.5 * (X ** 4 - 16 * X ** 2 + 5 * X).sum(dim=1)
    result = -H
    return result if batch else result.squeeze(0)
开发者ID:saschwan,项目名称:botorch,代码行数:22,代码来源:styblinski_tang.py

示例5: __init__

# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import ndimension [as 别名]
    def __init__(
        self,
        train_X: Tensor,
        train_Y: Tensor,
        task_feature: int,
        output_tasks: Optional[List[int]] = None,
        rank: Optional[int] = None,
    ) -> None:
        r"""Multi-Task GP model using an ICM kernel, inferring observation noise.

        Args:
            train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor
                of training data. One of the columns should contain the task
                features (see `task_feature` argument).
            train_Y: A `n` or `b x n` (batch mode) tensor of training
                observations.
            task_feature: The index of the task feature
                (`-d <= task_feature <= d`).
            output_tasks: A list of task indices for which to compute model
                outputs for. If omitted, return outputs for all task indices.
            rank: The rank to be used for the index kernel. If omitted, use a
                full rank (i.e. number of tasks) kernel.

        Example:
            >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2)
            >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1)
            >>> train_X = torch.stack([
            >>>     torch.cat([X1, i1], -1), torch.cat([X2, i2], -1),
            >>> ])
            >>> train_Y = torch.cat(f1(X1), f2(X2))
            >>> model = MultiTaskGP(train_X, train_Y, task_feature=-1)
        """
        if train_X.ndimension() != 2:
            # Currently, batch mode MTGPs are blocked upstream in GPyTorch
            raise ValueError(f"Unsupported shape {train_X.shape} for train_X.")
        d = train_X.shape[-1] - 1
        if not (-d <= task_feature <= d):
            raise ValueError(f"Must have that -{d} <= task_feature <= {d}")
        all_tasks = train_X[:, task_feature].unique().to(dtype=torch.long).tolist()
        if output_tasks is None:
            output_tasks = all_tasks
        else:
            if any(t not in all_tasks for t in output_tasks):
                raise RuntimeError("All output tasks must be present in input data.")
        self._output_tasks = output_tasks

        # TODO (T41270962): Support task-specific noise levels in likelihood
        likelihood = GaussianLikelihood(noise_prior=GammaPrior(1.1, 0.05))

        # construct indexer to be used in forward
        self._task_feature = task_feature
        self._base_idxr = torch.arange(d)
        self._base_idxr[task_feature:] += 1  # exclude task feature

        super().__init__(
            train_inputs=train_X, train_targets=train_Y, likelihood=likelihood
        )
        self.mean_module = ConstantMean()
        self.covar_module = ScaleKernel(
            base_kernel=MaternKernel(
                nu=2.5, ard_num_dims=d, lengthscale_prior=GammaPrior(3.0, 6.0)
            ),
            outputscale_prior=GammaPrior(2.0, 0.15),
        )
        num_tasks = len(all_tasks)
        self._rank = rank if rank is not None else num_tasks
        # TODO: Add LKJ prior for the index kernel
        self.task_covar_module = IndexKernel(num_tasks=num_tasks, rank=self._rank)
        self.to(train_X)
开发者ID:saschwan,项目名称:botorch,代码行数:71,代码来源:multitask.py


注:本文中的torch.Tensor.ndimension方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。