当前位置: 首页>>代码示例>>Python>>正文


Python torch.logspace方法代码示例

本文整理汇总了Python中torch.logspace方法的典型用法代码示例。如果您正苦于以下问题:Python torch.logspace方法的具体用法?Python torch.logspace怎么用?Python torch.logspace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.logspace方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_vec_eps

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def make_vec_eps(self, global_B, env_ranks):
        """Construct log-spaced epsilon values and select local assignments
        from the global number of sampler environment instances (for SyncRl
        and AsyncRl)."""
        if (self.eps_final_min is not None and
                self.eps_final_min != self._eps_final_scalar):  # vector epsilon.
            if self.alternating:  # In FF case, sampler sets agent.alternating.
                assert global_B % 2 == 0
                global_B = global_B // 2  # Env pairs will share epsilon.
                env_ranks = list(set([i // 2 for i in env_ranks]))
            self.eps_init = self._eps_init_scalar * torch.ones(len(env_ranks))
            global_eps_final = torch.logspace(
                torch.log10(torch.tensor(self.eps_final_min)),
                torch.log10(torch.tensor(self._eps_final_scalar)),
                global_B)
            self.eps_final = global_eps_final[env_ranks]
        self.eps_sample = self.eps_init 
开发者ID:astooke,项目名称:rlpyt,代码行数:19,代码来源:epsilon_greedy.py

示例2: collect_maxiou

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def collect_maxiou(outdir, model, segloader, segrunner):
    '''
    Returns maxiou and maxiou_level across the data set, one per layer.

    This is a performance-sensitive function.  Best performance is
    achieved with a counting scheme which assumes a segloader with
    batch_size 1.
    '''
    device = next(model.parameters()).device
    conditional_quantiles, label_fracs = collect_cond_quantiles(
            outdir, model, segloader, segrunner)

    labelcat, categories = segrunner.get_label_and_category_names()
    label_category = [categories.index(c) if c in categories else 0
                for l, c in labelcat]
    num_labels, num_categories = (len(n) for n in [labelcat, categories])

    label_list = [('label', i) for i in range(num_labels)]
    category_list = [('all',)] if num_categories <= 1 else (
            [('cat', i) for i in range(num_categories)])
    max_iou, max_iou_level, max_iou_quantile = {}, {}, {}
    fracs = torch.logspace(-3, 0, 100)
    progress = default_progress()
    for layer, cq in progress(conditional_quantiles.items(), desc='Maxiou'):
        levels = cq.conditional(('all',)).quantiles(1 - fracs)
        denoms = 1 - cq.collected_normalize(category_list, levels)
        isects = (1 - cq.collected_normalize(label_list, levels)) * label_fracs
        unions = label_fracs + denoms[label_category, :, :] - isects
        iou = isects / unions
        # TODO: erase any for which threshold is bad
        max_iou[layer], level_bucket = iou.max(2)
        max_iou_level[layer] = levels[
                torch.arange(levels.shape[0])[None,:], level_bucket]
        max_iou_quantile[layer] = fracs[level_bucket]
    for layer in model.retained_features():
        numpy.savez(os.path.join(outdir, safe_dir_name(layer), 'max_iou.npz'),
            max_iou=max_iou[layer].cpu().numpy(),
            max_iou_level=max_iou_level[layer].cpu().numpy(),
            max_iou_quantile=max_iou_quantile[layer].cpu().numpy())
    return (max_iou, max_iou_level, max_iou_quantile) 
开发者ID:CSAILVision,项目名称:gandissect,代码行数:42,代码来源:dissection.py

示例3: log_ap_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def log_ap_loss(logvar, sqr_dists, num_thresh=10):


    print('dists', float(sqr_dists.min()), float(sqr_dists.max()))
    print('logvar', float(logvar.min()), float(logvar.max()))

    def hook(grad):
        print('grad', float(grad.min()), float(grad.max()), float(grad.sum()))
    logvar.register_hook(hook)

    variance = torch.exp(logvar).view(-1, 1)
    stdev = torch.sqrt(variance)
    print('stdev', float(stdev.min()), float(stdev.max()))

    max_dist = math.sqrt(float(sqr_dists.max()))
    minvar, maxvar = float(stdev.min()), float(stdev.max())
    thresholds = torch.logspace(
        math.log10(1 / maxvar), math.log10(max_dist / minvar), num_thresh).type_as(stdev)
    
    print('maxdist: {:.2e} minvar: {:.2e} maxvar: {:.2e}'.format(max_dist, minvar, maxvar))
    print('thresholds {:.2e} - {:.2e}'.format(thresholds.min(), thresholds.max()))

    k_sigma = stdev * thresholds
    k_sigma_sqr = variance * thresholds ** 2
    mask = (sqr_dists.view(-1, 1) < k_sigma_sqr).float()

    erf = torch.erf(k_sigma)
    masked_erf = erf * mask
    masked_exp = stdev * torch.exp(-k_sigma_sqr) * mask

    loss = masked_exp.sum(0) * masked_erf.sum(0) / erf.sum(0)
    loss = (loss[0] + loss[-1]) / 2. + loss[1:-1].sum()
    return -torch.log(loss * CONST / len(variance)) 
开发者ID:tom-roddick,项目名称:oft,代码行数:35,代码来源:loss.py

示例4: compute_ls_grid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def compute_ls_grid(As, y, sns_vec, m, ks, n_ls, l_eps, dtype):
    """Compute l values for each given k and return a dictionary mapping
    k to a list (in decreasing order) of lambda values.

    Arguments have the same meaning as in gel_paths2. sns_vec is a vector of
    sns_j values as opposed to the matrix computed in gel_paths2.
    """
    ls_grid = {}
    # The bound is given by max{||A_j'@(y - b_0)||/(m*sqrt{n_j}*k)} where b_0 =
    # 1'@y/m. So most things can be precomputed.
    l_max_b_0 = y.mean()
    l_max_unscaled = max(
        (A_j.t() @ (y - l_max_b_0)).norm(p=2) / (m * sns_j)
        for A_j, sns_j in zip(As, sns_vec)
    )
    for k in ks:
        l_max = l_max_unscaled / k
        if n_ls == 1:
            ls_grid[k] = [l_max]
        else:
            l_min = l_max * l_eps
            ls = torch.logspace(
                math.log10(l_min), math.log10(l_max), steps=n_ls, dtype=dtype
            )
            ls = sorted([l.item() for l in ls], reverse=True)
            ls_grid[k] = ls
    return ls_grid 
开发者ID:jayanthkoushik,项目名称:torch-gel,代码行数:29,代码来源:gelpaths.py

示例5: logspace

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def logspace(*args, **kwargs):
    """
    Creates a 1D :class:`Tensor` with logarithmically spaced values (see PyTorch's `logspace`).

    :param args:
    :param kwargs:

    :return: a 1D :class:`Tensor`
    """

    return tn.Tensor([torch.logspace(*args, **kwargs)[None, :, None]]) 
开发者ID:rballester,项目名称:tntorch,代码行数:13,代码来源:create.py

示例6: plot_delta_measure

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def plot_delta_measure(self, start, end, steps=50):
        """
        Plot delta measure
        :param start:
        :param end:
        :return:
        """
        # Gamma values
        gamma_values = torch.logspace(start=start, end=end, steps=steps)

        # Log10 of gamma values
        gamma_log_values = torch.log10(gamma_values)

        # Delta measures
        C_norms = torch.zeros(steps)
        delta_scores = torch.zeros(steps)

        # For each gamma measure
        for i, gamma in enumerate(gamma_values):
            delta_scores[i], C_norms[i] = self.delta_measure(float(gamma), epsilon=0.1)
        # end for

        # Plot
        plt.plot(gamma_log_values.numpy(), delta_scores.numpy())
        plt.plot(gamma_log_values.numpy(), C_norms.numpy())
        plt.show()
    # end plot_delta_measure

    # Compute Delta measure 
开发者ID:nschaetti,项目名称:EchoTorch,代码行数:31,代码来源:Conceptor.py

示例7: __regularize_residual_volume

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def __regularize_residual_volume(self, JtJ, Jt, JtR, weights, pose,
        invD0, invD1, x0, x1, K, sample_range):
        """ regularize the approximate with residual volume

        :param JtJ, the approximated Hessian JtJ
        :param Jt, the trasposed Jacobian
        :param JtR, the Right-hand size residual
        :param weights, the weight matrix
        :param pose, the initial estimated pose
        :param invD0, the template inverse depth map
        :param invD1, the image inverse depth map
        :param K, the intrinsic parameters
        :param x0, the template feature map
        :param x1, the image feature map
        :param sample_range, the numerb of samples

        ---------------
        :return the damped Hessian matrix
        """
        # the following current support only single scale
        JtR_volumes = []

        B, C, H, W = x0.shape
        px, py = geometry.generate_xy_grid(B, H, W, K)

        diag_mask = torch.eye(6).view(1,6,6).type_as(JtJ)
        diagJtJ = diag_mask * JtJ
        traceJtJ = torch.sum(diagJtJ, (2,1))
        epsilon = (traceJtJ * 1e-6).view(B,1,1) * diag_mask
        n = sample_range
        lambdas = torch.logspace(-5, 5, n).type_as(JtJ)

        for s in range(n):
            # the epsilon is to prevent the matrix to be too ill-conditioned
            D = lambdas[s] * diagJtJ + epsilon
            Hessian = JtJ + D
            pose_s = inverse_update_pose(Hessian, JtR, pose)

            res_s,_= compute_warped_residual(pose_s, invD0, invD1, x0, x1, px, py, K)
            JtR_s = torch.bmm(Jt, (weights * res_s).view(B,-1,1))
            JtR_volumes.append(JtR_s)

        JtR_flat = torch.cat(tuple(JtR_volumes), dim=2).view(B,-1)
        JtJ_flat = JtJ.view(B,-1)
        damp_est = self.net(torch.cat((JtR_flat, JtJ_flat), dim=1))
        R = diag_mask * damp_est.view(B,6,1) + epsilon # also lift-up

        return JtJ + R 
开发者ID:lvzhaoyang,项目名称:DeeperInverseCompositionalAlgorithm,代码行数:50,代码来源:algorithms.py

示例8: get_precision_recall

# 需要导入模块: import torch [as 别名]
# 或者: from torch import logspace [as 别名]
def get_precision_recall(args, score, label, num_samples, beta=1.0, sampling='log', predicted_score=None):
    '''
    :param args:
    :param score: anomaly scores
    :param label: anomaly labels
    :param num_samples: the number of threshold samples
    :param beta:
    :param scale:
    :return:
    '''
    if predicted_score is not None:
        score = score - torch.FloatTensor(predicted_score).squeeze().to(args.device)

    maximum = score.max()
    if sampling=='log':
        # Sample thresholds logarithmically
        # The sampled thresholds are logarithmically spaced between: math:`10 ^ {start}` and: math:`10 ^ {end}`.
        th = torch.logspace(0, torch.log10(torch.tensor(maximum)), num_samples).to(args.device)
    else:
        # Sample thresholds equally
        # The sampled thresholds are equally spaced points between: attr:`start` and: attr:`end`
        th = torch.linspace(0, maximum, num_samples).to(args.device)

    precision = []
    recall = []

    for i in range(len(th)):
        anomaly = (score > th[i]).float()
        idx = anomaly * 2 + label
        tn = (idx == 0.0).sum().item()  # tn
        fn = (idx == 1.0).sum().item()  # fn
        fp = (idx == 2.0).sum().item()  # fp
        tp = (idx == 3.0).sum().item()  # tp

        p = tp / (tp + fp + 1e-7)
        r = tp / (tp + fn + 1e-7)

        if p != 0 and r != 0:
            precision.append(p)
            recall.append(r)

    precision = torch.FloatTensor(precision)
    recall = torch.FloatTensor(recall)


    f1 = (1 + beta ** 2) * (precision * recall).div(beta ** 2 * precision + recall + 1e-7)

    return precision, recall, f1 
开发者ID:chickenbestlover,项目名称:RNN-Time-series-Anomaly-Detection,代码行数:50,代码来源:anomalyDetector.py


注:本文中的torch.logspace方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。