當前位置: 首頁>>代碼示例>>Python>>正文


Python norm.logcdf方法代碼示例

本文整理匯總了Python中scipy.stats.norm.logcdf方法的典型用法代碼示例。如果您正苦於以下問題:Python norm.logcdf方法的具體用法?Python norm.logcdf怎麽用?Python norm.logcdf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.stats.norm的用法示例。


在下文中一共展示了norm.logcdf方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _setup

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def _setup(self):
        super(MinValueEntropySearch, self)._setup()

        # Apply Gumbel sampling
        m = self.models[0]
        valid = self.feasible_data_index()

        # Work with feasible data
        X = self.data[0][valid, :]
        N = np.shape(X)[0]
        Xrand = RandomDesign(self.gridsize, self._domain).generate()
        fmean, fvar = m.predict_f(np.vstack((X, Xrand)))
        idx = np.argmin(fmean[:N])
        right = fmean[idx].flatten()# + 2*np.sqrt(fvar[idx]).flatten()
        left = right
        probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / np.sqrt(fvar)), axis=0))

        i = 0
        while probf(left) < 0.75:
            left = 2. ** i * np.min(fmean - 5. * np.sqrt(fvar)) + (1. - 2. ** i) * right
            i += 1

        # Binary search for 3 percentiles
        q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.01),
                          [0.25, 0.5, 0.75])
        beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
        alpha = med + beta * np.log(np.log(2.))

        # obtain samples from y*
        mins = -np.log(-np.log(np.random.rand(self.num_samples).astype(np_float_type))) * beta + alpha
        self.samples.set_data(mins) 
開發者ID:GPflow,項目名稱:GPflowOpt,代碼行數:33,代碼來源:mes.py

示例2: invLogCDF

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def invLogCDF(x,mu,sigma): #normal distribution cdf
    x = (x - mu) / sigma
    return norm.logcdf(-x) #note: we mutiple by -1 after normalization to better get the 1-cdf 
開發者ID:ymirsky,項目名稱:KitNET-py,代碼行數:5,代碼來源:utils.py

示例3: update_parameters

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def update_parameters(self):
        # apply gumbel sampling to obtain samples from y*
        # we approximate Pr(y*^hat<y) by Gumbel(alpha,beta)
        # generate grid
        N = self.model.model.X.shape[0]

        random_design = RandomDesign(self.space)
        grid = random_design.get_samples(self.grid_size)
        fmean, fvar = self.model.model.predict(np.vstack([self.model.model.X, grid]), include_likelihood=False)
        fsd = np.sqrt(fvar)
        idx = np.argmin(fmean[:N])

        # scaling so that gumbel scale is proportional to IQ range of cdf Pr(y*<z)
        # find quantiles Pr(y*<y1)=r1 and Pr(y*<y2)=r2
        right = fmean[idx].flatten()
        left = right
        probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / fsd), axis=0))
        i = 0
        while probf(left) < 0.75:
            left = 2. ** i * np.min(fmean - 5. * fsd) + (1. - 2. ** i) * right
            i += 1
        i = 0
        while probf(right) > 0.25:
            right = -2. ** i * np.min(fmean - 5. * fsd) + (1. + 2. ** i) * fmean[idx].flatten()
            i += 1

        # Binary search for 3 percentiles
        q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.00001),
                            [0.25, 0.5, 0.75])

        # solve for gumbel params
        beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
        alpha = med + beta * np.log(np.log(2.))

        # sample K length vector from unif([0,1])
        # return K Y* samples
        self.mins = -np.log(-np.log(np.random.rand(self.num_samples))) * beta + alpha 
開發者ID:amzn,項目名稱:emukit,代碼行數:39,代碼來源:max_value_entropy_search.py

示例4: evaluate

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def evaluate(self, x: np.ndarray) -> np.ndarray:
        """
        Evaluates the penalization function value
        """

        if self.x_batch is None:
            return np.ones((x.shape[0], 1))

        distances = _distance_calculation(x, self.x_batch)
        normalized_distance = (distances - self.radius) / self.scale
        return norm.logcdf(normalized_distance).sum(axis=1, keepdims=True) 
開發者ID:amzn,項目名稱:emukit,代碼行數:13,代碼來源:local_penalization.py

示例5: evaluate_with_gradients

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        Evaluates the penalization function value and gradients with respect to x
        """

        if self.x_batch is None:
            return np.ones((x.shape[0], 1)), np.zeros(x.shape)

        distances, d_dist_dx = _distance_with_gradient(x, self.x_batch)
        normalized_distance = (distances - self.radius) / self.scale
        h_func = norm.cdf(normalized_distance)
        d_value_dx = 0.5 * (1 / h_func[:, :, None]) \
                     * norm.pdf(normalized_distance)[:, :, None] \
                     * d_dist_dx / self.scale[None, :, None]
        return norm.logcdf(normalized_distance).sum(1, keepdims=True), d_value_dx.sum(1) 
開發者ID:amzn,項目名稱:emukit,代碼行數:17,代碼來源:local_penalization.py

示例6: integrateNormalDensity

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def integrateNormalDensity(lb,ub,mu = 0,sigma = 1):
    from scipy.stats import norm
    assert not (ub < lb)
    lessThanUpper = norm.logcdf(ub,loc = mu,scale = sigma)
    lessThanLower = norm.logcdf(lb,loc = mu,scale = sigma)
    #print lessThanUpper,lessThanLower,lessThanUpper-lessThanLower,1 - math.exp(lessThanLower - lessThanUpper)
    return lessThanUpper + np.log1p(-math.exp(lessThanLower - lessThanUpper)) 
開發者ID:ellisk42,項目名稱:TikZ,代碼行數:9,代碼來源:utilities.py

示例7: _scores

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def _scores(self, range_, k, cumsum, m_log_combs, n_log_combs):
        """Calculates the score function for all possible numbers of rows (or columns)."""
        avgs = cumsum / (range_ * k)
        log_probs = norm.logcdf(-avgs * np.sqrt(range_ * k))
        return - log_probs - m_log_combs - n_log_combs[k-1] 
開發者ID:padilha,項目名稱:biclustlib,代碼行數:7,代碼來源:las.py

示例8: mle_censored_mean

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def mle_censored_mean(cmpd_df, std_est, value_col='PIC50', relation_col='relation'):
    """
    Compute a maximum likelihood estimate of the true mean value underlying the distribution of replicate assay measurements for a
    single compound. The data may be a mix of censored and uncensored measurements, as indicated by the 'relation' column in the input
    data frame cmpd_df. std_est is an estimate for the standard deviation of the distribution, which is assumed to be Gaussian;
    we typically compute a common estimate for the whole dataset using replicate_rmsd().
    """
    left_censored = np.array(cmpd_df[relation_col].values == '<', dtype=bool)
    right_censored = np.array(cmpd_df[relation_col].values == '>' , dtype=bool)
    not_censored = ~(left_censored | right_censored)
    n_left_cens = sum(left_censored)
    n_right_cens = sum(right_censored)
    nreps = cmpd_df.shape[0]
    values = cmpd_df[value_col].values
    nan = float('nan')

    relation = ''
    # If all the replicate values are left- or right-censored, return the smallest or largest reported (threshold) value accordingly.
    if n_left_cens == nreps:
        mle_value = min(values)
        relation = '<'
    elif n_right_cens == nreps:
        mle_value = max(values)
        relation = '>'
    elif n_left_cens + n_right_cens == 0:
        # If no values are censored, the MLE is the actual mean.
        mle_value = np.mean(values)
    else:
        # Some, but not all observations are censored.
        # First, define the negative log likelihood function
        def loglik(mu):
            ll = -sum(norm.logpdf(values[not_censored], loc=mu, scale=std_est))
            if n_left_cens > 0:
                ll -= sum(norm.logcdf(values[left_censored], loc=mu, scale=std_est))
            if n_right_cens > 0:
                ll -= sum(norm.logsf(values[right_censored], loc=mu, scale=std_est))
            return ll

        # Then minimize it
        opt_res = minimize_scalar(loglik, method='brent')
        if not opt_res.success:
            print('Likelihood maximization failed, message is: "%s"' % opt_res.message)
            mle_value = nan
        else:
            mle_value = opt_res.x
    return mle_value, relation


# ****************************************************************************************************************************************** 
開發者ID:ATOMconsortium,項目名稱:AMPL,代碼行數:51,代碼來源:curate_data.py

示例9: log_likelihood

# 需要導入模塊: from scipy.stats import norm [as 別名]
# 或者: from scipy.stats.norm import logcdf [as 別名]
def log_likelihood(self, smis, *, log_0=-1000.0, **targets):
        def _avoid_overflow(ll_):
            # log(exp(log(UP) - log(C)) - exp(log(LOW) - log(C))) + log(C)
            # where C = max(log(UP), max(LOW))
            ll_c = np.max(ll_)
            ll_ = np.log(np.exp(ll_[1] - ll_c) - np.exp(ll_[0] - ll_c)) + ll_c
            return ll_

        # self.update_targets(reset=False, **targets):
        for k, v in targets.items():
            if not isinstance(v, tuple) or len(v) != 2 or v[1] <= v[0]:
                raise ValueError('must be a tuple with (low, up) boundary')
            self._targets[k] = v

        if not self._targets:
            raise RuntimeError('<targets> is empty')

        ll = pd.DataFrame(np.full((len(smis), len(self._mdl)), log_0), columns=self._mdl.keys())

        # 1. apply prediction on given sims
        # 2. reset returns' index to [0, 1, ..., len(smis) - 1], this should be consistent with ll's index
        # 3. drop all rows which have NaN value(s)
        pred = self.predict(smis).reset_index(drop=True).dropna(axis='index', how='any')

        # because pred only contains available data
        # 'pred.index.values' should eq to the previous implementation
        idx = pred.index.values

        # calculate likelihood
        for k, (low, up) in self._targets.items():  # k: target; v: (low, up)

            # predict mean, std for all smiles
            mean, std = pred[k + ': mean'], pred[k + ': std']

            # calculate low likelihood
            low_ll = norm.logcdf(low, loc=np.asarray(mean), scale=np.asarray(std))

            # calculate up likelihood
            up_ll = norm.logcdf(up, loc=np.asarray(mean), scale=np.asarray(std))

            # zip low and up likelihood to a 1-dim array then save it.
            # like: [(tar_low_smi1, tar_up_smi1),  (tar_low_smi2, tar_up_smi2), ..., (tar_low_smiN, tar_up_smiN)]
            lls = zip(low_ll, up_ll)
            ll[k].iloc[idx] = np.array([*map(_avoid_overflow, list(lls))])

        return ll 
開發者ID:yoshida-lab,項目名稱:XenonPy,代碼行數:48,代碼來源:estimator.py


注:本文中的scipy.stats.norm.logcdf方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。