当前位置: 首页>>代码示例>>Python>>正文


Python norm.ppf方法代码示例

本文整理汇总了Python中scipy.stats.norm.ppf方法的典型用法代码示例。如果您正苦于以下问题:Python norm.ppf方法的具体用法?Python norm.ppf怎么用?Python norm.ppf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.stats.norm的用法示例。


在下文中一共展示了norm.ppf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _t_value

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def _t_value(self):
        r"""
        Returns the critical t-statistic given the input alpha-level (defaults to 0.05).

        Returns
        -------
        tval : float
            The critical t-value for using in computing the Least Significant Difference.

        Notes
        -----
        Scipy's :code:`t.ppf` method is used to compute the critical t-value.

        """
        tval = t.ppf(1 - self.alpha / 2, self.n - self.k)

        return tval 
开发者ID:aschleg,项目名称:hypothetical,代码行数:19,代码来源:nonparametric.py

示例2: test_calc_bias_correction_bca

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def test_calc_bias_correction_bca(self):
        # There are 100 bootstrap replicates, already in ascending order for
        # each column. If we take row 51 to be the mle, then 50% of the
        # replicates are less than the mle, and we should have bias = 0.
        expected_result = np.zeros(self.mle_params.size)

        # Alias the function to be tested.
        func = bc.calc_bias_correction_bca

        # Perform the desired test
        func_result = func(self.bootstrap_replicates, self.mle_params)
        self.assertIsInstance(func_result, np.ndarray)
        self.assertEqual(func_result.shape, expected_result.shape)
        npt.assert_allclose(func_result, expected_result)

        # Create a fake mle that should be higher than 95% of the results
        fake_mle = self.bootstrap_replicates[95]
        expected_result_2 = norm.ppf(0.95) * np.ones(self.mle_params.size)
        func_result_2 = func(self.bootstrap_replicates, fake_mle)

        self.assertIsInstance(func_result_2, np.ndarray)
        self.assertEqual(func_result_2.shape, expected_result_2.shape)
        npt.assert_allclose(func_result_2, expected_result_2)
        return None 
开发者ID:timothyb0912,项目名称:pylogit,代码行数:26,代码来源:test_bootstrap_calcs.py

示例3: update

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def update(self, model):
        self.model = model

        self.sn2 = self.model.get_noise()

        # Sample representer points
        self.sampling_acquisition.update(model)
        self.sample_representer_points()

        # Omega values which are needed for the innovations
        # by sampling from a uniform grid
        self.W = norm.ppf(np.linspace(1. / (self.Np + 1),
                                      1 - 1. / (self.Np + 1),
                                      self.Np))[np.newaxis, :]

        # Compute current posterior belief at the representer points
        self.Mb, self.Vb = self.model.predict(self.zb, full_cov=True)
        self.pmin = mc_part.joint_pmin(self.Mb, self.Vb, self.Nf)
        self.logP = np.log(self.pmin) 
开发者ID:automl,项目名称:RoBO,代码行数:21,代码来源:information_gain_mc.py

示例4: testSecurityNormInvValueHolder

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def testSecurityNormInvValueHolder(self):
        mm1 = SecurityNormInvValueHolder('open')
        mm2 = SecurityNormInvValueHolder('open', fullAcc=True)

        for i in range(len(self.aapl['close'])):
            data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])),
                        ibm=dict(open=norm.cdf(self.ibm['open'][i])))
            mm1.push(data)
            mm2.push(data)

            value1 = mm1.value
            value2 = mm2.value
            for name in value1.index():
                expected = norm.ppf(data[name]['open'])
                calculated = value1[name]
                self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n'
                                                                'expected:   {1:.12f}\n'
                                                                'calculat: {2:.12f}'
                                       .format(i, expected, calculated))

                calculated = value2[name]
                self.assertAlmostEqual(expected, calculated, 12, 'at index {0}\n'
                                                                 'expected:   {1:.12f}\n'
                                                                 'calculat: {2:.12f}'
                                       .format(i, expected, calculated)) 
开发者ID:alpha-miner,项目名称:Finance-Python,代码行数:27,代码来源:testStatelessTechnicalAnalysers.py

示例5: liptak

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def liptak(pvalues):
    r"""
    Apply Liptak's combining function

    .. math:: \sum_i \Phi^{-1}(1-p_i)

    where $\Phi^{-1}$ is the inverse CDF of the standard normal distribution.

    Parameters
    ----------
    pvalues : array_like
        Array of p-values to combine

    Returns
    -------
    float
        Liptak's combined test statistic
    """
    return np.sum(norm.ppf(1 - pvalues)) 
开发者ID:statlab,项目名称:permute,代码行数:21,代码来源:npc.py

示例6: predictions

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def predictions(self, image, forward_batch_size=32):
        from scipy.stats import norm
        image, _ = self._process_input(image)
        image_batch = np.vstack([[image]] * self._iterations)
        noise = np.random.normal(scale=self._std, size=image_batch.shape).astype(np.float32)
        image_batch += noise
        predictions = self._model.batch_predictions(image_batch)
        logits = np.argmax(predictions, axis=1)
        one_hot = np.zeros([self._iterations, self._num_classes])
        logits_one_hot = np.eye(self._num_classes)[logits]
        one_hot += logits_one_hot
        one_hot = np.sum(one_hot, axis=0)
        ranks = sorted(one_hot / np.sum(one_hot))[::-1]
        qi = ranks[0] - 1e-9
        qj = ranks[1] + 1e-9
        bound = self._std / 2. * (norm.ppf(qi) - norm.ppf(qj))
        return np.argmax(one_hot), bound 
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:19,代码来源:gaussian.py

示例7: std_mad

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def std_mad(x):
    """Robust estimation of the standard deviation, based on the Corrected Median
    Absolute Deviation (MAD) of x.
    This computes the MAD of x, and applies the Gaussian distribution
    correction, making it a consistent estimator of the standard-deviation
    (when the sample looks Gaussian with outliers).

    Parameters
    ----------
    x : `np.ndarray`
        Input vector

    Returns
    -------
    output : `float`
        A robust estimation of the standard deviation
    """
    from scipy.stats import norm
    correction = 1 / norm.ppf(3 / 4)
    return correction * np.median(np.abs(x - np.median(x))) 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:22,代码来源:robust.py

示例8: bias_corrected_ci

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def bias_corrected_ci(estimate, samples, conf=95):
    """
    Return the bias-corrected bootstrap confidence interval for an estimate
    :param estimate: Numerical estimate in the original sample
    :param samples: Nx1 array of bootstrapped estimates
    :param conf: Level of the desired confidence interval
    :return: Bias-corrected bootstrapped LLCI and ULCI for the estimate.
    """
    # noinspection PyUnresolvedReferences
    ptilde = ((samples < estimate) * 1).mean()
    Z = norm.ppf(ptilde)
    Zci = z_score(conf)
    Zlow, Zhigh = -Zci + 2 * Z, Zci + 2 * Z
    plow, phigh = norm._cdf(Zlow), norm._cdf(Zhigh)
    llci = np.percentile(samples, plow * 100, interpolation="lower")
    ulci = np.percentile(samples, phigh * 100, interpolation="higher")
    return llci, ulci 
开发者ID:QuentinAndre,项目名称:pyprocessmacro,代码行数:19,代码来源:utils.py

示例9: _fisher_confint

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def _fisher_confint(self, alpha: float, observed: bool = False) -> List[float]:
        """Compute the Fisher information confidence interval for the MLE of the previous run.

        Args:
            alpha: Specifies the (1 - alpha) confidence level (0 < alpha < 1).
            observed: If True, the observed Fisher information is used to construct the
                confidence interval, otherwise the expected Fisher information.

        Returns:
            The Fisher information confidence interval.
        """
        shots = self._ret['shots']
        mle = self._ret['ml_value']

        # approximate the standard deviation of the MLE and construct the confidence interval
        std = np.sqrt(shots * self._compute_fisher_information(observed))
        ci = mle + norm.ppf(1 - alpha / 2) / std * np.array([-1, 1])

        # transform the confidence interval from [0, 1] to the target interval
        return [self.a_factory.value_to_estimation(bound) for bound in ci] 
开发者ID:Qiskit,项目名称:qiskit-aqua,代码行数:22,代码来源:ae.py

示例10: _init_model

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def _init_model(self, C, eta):
        """
        Initialize model.
        """
        logger.info("init model starts")
        self.model["mu"] = defaultdict()  # model parameter mean
        self.model["S"] = defaultdict()     # model parameter covariance
        self.model["C"] = C                        # PA parameter
        self.model["eta"] = eta                  # confidence parameter
        self.model["phi"] = norm.ppf(norm.cdf(eta))      # inverse of cdf(eta)
        self.model["phi_2"] = np.power(self.model["phi"], 2)
        self.model["psi"] = 1 + self.model["phi_2"] / 2
        self.model["zeta"] = 1 + self.model["phi_2"]
        logger.info("init model finished")

        pass 
开发者ID:kzky,项目名称:python-online-machine-learning-library,代码行数:18,代码来源:multiclass_soft_confidence_weighted_2_diag.py

示例11: prewarp

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def prewarp(self, xx):
        """Extra work needed to get variables into the Gaussian space
        representation."""
        xxw = {}
        for arg_name, vv in xx.items():
            assert np.isscalar(vv)
            space = self.space[arg_name]

            if space is not None:
                # Warp so we think it is apriori uniform in [a, b]
                vv = space.warp(vv)
                assert vv.size == 1

                # Now make uniform on [0, 1], also unpack warped to scalar
                (lb, ub), = space.get_bounds()
                vv = linear_rescale(vv.item(), lb, ub, 0, 1)

                # Now make std Gaussian apriori
                vv = norm.ppf(vv)
            assert np.isscalar(vv)
            xxw[arg_name] = vv
        return xxw 
开发者ID:uber,项目名称:bayesmark,代码行数:24,代码来源:nevergrad_optimizer.py

示例12: _confidence_interval_by_alpha

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def _confidence_interval_by_alpha(cls, p_hat, n, alpha, method='wald'):
        """Compute confidence interval for estimate of Bernoulli parameter p.

        Args:
            p_hat: maximum likelihood estimate of p
            n: samples observed
            alpha: the probability that the true p falls outside the CI

        Returns:
            left, right
        """
        prob = 1 - 0.5 * alpha
        z = norm.ppf(prob)

        compute_ci = cls._confidence_interval_by_z_wald if method == 'wald' else cls._confidence_interval_by_z_wilson

        return compute_ci(p_hat, n, z) 
开发者ID:kelvinguu,项目名称:lang2program,代码行数:19,代码来源:evaluation.py

示例13: test_conditional_value_at_risk_mc

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def test_conditional_value_at_risk_mc(self):
    for mu, sigma, alpha in [(1, 1, 0.05), (0.4, 0.1, 0.02), (0.1, 2, 0.01)]:
      # prepare estimator dummy
      mu1 = np.array([mu])
      sigma1 = np.identity(n=1) * sigma
      est = GaussianDummy(mean=mu1, cov=sigma1**2, ndim_x=1, ndim_y=1, has_pdf=True)
      est.fit(None, None)

      CVaR_true = mu - sigma/alpha * norm.pdf(norm.ppf(alpha))
      CVaR_est = est.conditional_value_at_risk(x_cond=np.array([[0],[1]]), alpha=alpha)

      print("CVaR True (%.2f, %.2f):"%(mu, sigma), CVaR_true)
      print("CVaR_est (%.2f, %.2f):"%(mu, sigma), CVaR_est)
      print("VaR (%.2f, %.2f):"%(mu, sigma), est.value_at_risk(x_cond=np.array([[0],[1]]), alpha=alpha))

      self.assertAlmostEqual(CVaR_est[0], CVaR_true, places=2)
      self.assertAlmostEqual(CVaR_est[1], CVaR_true, places=2) 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:19,代码来源:unittests_evaluations.py

示例14: value_at_risk

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def value_at_risk(self, x_cond, alpha=0.01, **kwargs):
    """ Computes the Value-at-Risk (VaR) of the fitted distribution. Only if ndim_y = 1

    Args:
      x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
      alpha: quantile percentage of the distribution

    Returns:
       VaR values for each x to condition on - numpy array of shape (n_values)
    """
    assert self.ndim_y == 1, "Value at Risk can only be computed when ndim_y = 1"
    assert x_cond.ndim == 2

    VaR = norm.ppf(alpha, loc=x_cond, scale=self._std(x_cond))[:,0]
    assert VaR.shape == (x_cond.shape[0],)
    return VaR 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:18,代码来源:EconDensity.py

示例15: value_at_risk

# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import ppf [as 别名]
def value_at_risk(self, x_cond, alpha=0.01, **kwargs):
    """ Computes the Value-at-Risk (VaR) of the fitted distribution. Only if ndim_y = 1

    Args:
      x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
      alpha: quantile percentage of the distribution

    Returns:
       VaR values for each x to condition on - numpy array of shape (n_values)
    """
    assert self.ndim_y == 1, "Value at Risk can only be computed when ndim_y = 1"
    assert x_cond.ndim == 2

    VaR = norm.ppf(alpha, loc=self._mean(x_cond), scale=self._std(x_cond))[:,0]
    assert VaR.shape == (x_cond.shape[0],)
    return VaR 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:18,代码来源:LinearGaussian.py


注:本文中的scipy.stats.norm.ppf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。