當前位置: 首頁>>代碼示例>>Python>>正文


Python chi2.pdf方法代碼示例

本文整理匯總了Python中scipy.stats.chi2.pdf方法的典型用法代碼示例。如果您正苦於以下問題:Python chi2.pdf方法的具體用法?Python chi2.pdf怎麽用?Python chi2.pdf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.stats.chi2的用法示例。


在下文中一共展示了chi2.pdf方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: visualize_pruning

# 需要導入模塊: from scipy.stats import chi2 [as 別名]
# 或者: from scipy.stats.chi2 import pdf [as 別名]
def visualize_pruning(w_norm, n_retained,
                      title='Initial model weights vs theoretical for pruning'):
    fig, ax1 = plt.subplots()
    ax1.set_title(title)
    ax1.hist(w_norm, normed=True, bins=200, alpha=0.6, histtype='stepfilled',
             range=[0, n_retained * 5])
    ax1.axvline(x=n_retained, linewidth=1, color='r')
    ax1.set_ylabel('PDF', color='b')

    ax2 = ax1.twinx()
    ax2.set_ylabel('Survival Function', color='r')

    ax1.set_xlabel('w_norm')

    x = np.linspace(chi2.ppf(0.001, n_retained),
                    chi2.ppf(0.999, n_retained), 100)
    ax2.plot(x, chi2.sf(x, n_retained),
             'g-', lw=1, alpha=0.6, label='chi2 pdf')
    ax1.plot(x, chi2.pdf(x, n_retained),
             'r-', lw=1, alpha=0.6, label='chi2 pdf') 
開發者ID:menpo,項目名稱:lsfm,代碼行數:22,代碼來源:visualize.py

示例2: compute_treatment_likelihood_ratio

# 需要導入模塊: from scipy.stats import chi2 [as 別名]
# 或者: from scipy.stats.chi2 import pdf [as 別名]
def compute_treatment_likelihood_ratio(df, term_counts, word_index):
	null_model = train_treatment_classifier(df, term_counts, word_index, occurence=None)
	model_st_word = train_treatment_classifier(df, term_counts, word_index)
	model_st_no_word = train_treatment_classifier(df, term_counts, word_index, occurence=0)

	word_occurence = term_counts[:,word_index].toarray()
	mask_word = (word_occurence == 1).flatten()
	mask_no_word = (word_occurence == 0).flatten()
	p_ind = np.arange(term_counts.shape[0])
	word_occurs = df[df.post_index.isin(p_ind[mask_word])]
	word_no_occurs = df[df.post_index.isin(p_ind[mask_no_word])]

	null_given_word = predict_treatment_prob_from_model(null_model, word_occurs)
	null_given_no_word = predict_treatment_prob_from_model(null_model, word_no_occurs)
	p_t_given_word = predict_treatment_prob_from_model(model_st_word, word_occurs)
	p_t_given_no_word = predict_treatment_prob_from_model(model_st_no_word, word_no_occurs)

	ratio_word = np.log(null_given_word / p_t_given_word)
	ratio_no_word = np.log(null_given_no_word / p_t_given_no_word)
	ratio = np.hstack([ratio_word, ratio_no_word])
	print("Mean and std of log likelihood ratios", ratio.mean(), ratio.std())
	# return -2*ratio.sum(), chi2.pdf(-2*ratio.sum(), df=2)
	return ttest_1samp(ratio, 0.0) 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:25,代碼來源:test_cond_indep.py

示例3: likelihood_ratio_hypothesis_test

# 需要導入模塊: from scipy.stats import chi2 [as 別名]
# 或者: from scipy.stats.chi2 import pdf [as 別名]
def likelihood_ratio_hypothesis_test(df, term_counts, word_index):
	treated = df[df.treatment==1]
	control = df[df.treatment==0]
	null_model = train_classifier(df, term_counts, word_index, treat_index=None)
	model_st_treated = train_classifier(df, term_counts, word_index)
	model_st_not_treated = train_classifier(df, term_counts, word_index, treat_index=0)

	null_st_treated = compute_word_occur_prob(null_model, treated)
	null_st_not_treated = compute_word_occur_prob(null_model, control)
	prob_st_treated = compute_word_occur_prob(model_st_treated, treated)
	prob_st_not_treated = compute_word_occur_prob(model_st_not_treated)

	ratio_treated = np.log(null_st_treated / prob_st_treated)
	ratio_control = np.log(null_st_not_treated / prob_st_not_treated)
	ratios = np.hstack([ratio_treated,ratio_control])

	print("Mean and std. of log likelihood ratios:", ratios.mean(), ratios.std())
	return ttest_1samp(ratios, 0.0)
	
	# statistic = -2 * ratios.sum()
	# p_value = chi2.pdf(statistic, df=2)
	# return statistic, p_value 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:24,代碼來源:test_cond_indep.py

示例4: calculate_bartlett_sphericity

# 需要導入模塊: from scipy.stats import chi2 [as 別名]
# 或者: from scipy.stats.chi2 import pdf [as 別名]
def calculate_bartlett_sphericity(x):
    """
    Test the hypothesis that the correlation matrix
    is equal to the identity matrix.identity

    H0: The matrix of population correlations is equal to I.
    H1: The matrix of population correlations is not equal to I.

    The formula for Bartlett's Sphericity test is:

    .. math:: -1 * (n - 1 - ((2p + 5) / 6)) * ln(det(R))

    Where R det(R) is the determinant of the correlation matrix,
    and p is the number of variables.

    Parameters
    ----------
    x : array-like
        The array from which to calculate sphericity.

    Returns
    -------
    statistic : float
        The chi-square value.
    p_value : float
        The associated p-value for the test.
    """
    n, p = x.shape
    x_corr = corr(x)

    corr_det = np.linalg.det(x_corr)
    statistic = -np.log(corr_det) * (n - 1 - (2 * p + 5) / 6)
    degrees_of_freedom = p * (p - 1) / 2
    p_value = chi2.pdf(statistic, degrees_of_freedom)
    return statistic, p_value 
開發者ID:EducationalTestingService,項目名稱:factor_analyzer,代碼行數:37,代碼來源:factor_analyzer.py

示例5: art_qi2

# 需要導入模塊: from scipy.stats import chi2 [as 別名]
# 或者: from scipy.stats.chi2 import pdf [as 別名]
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
    r"""
    Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
    :math:`\chi^2` distribution onto the intensity distribution of
    non-artifactual background (within the "hat" mask):


    .. math ::

        \chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}

    where :math:`n` is the number of coil elements.

    :param numpy.ndarray img: input data
    :param numpy.ndarray airmask: input air mask without artifacts

    """

    from sklearn.neighbors import KernelDensity
    from scipy.stats import chi2
    from mriqc.viz.misc import plot_qi2

    # S. Ogawa was born
    np.random.seed(1191935)

    data = img[airmask > 0]
    data = data[data > 0]

    # Write out figure of the fitting
    out_file = op.abspath("error.svg")
    with open(out_file, "w") as ofh:
        ofh.write("<p>Background noise fitting could not be plotted.</p>")

    if len(data) < min_voxels:
        return 0.0, out_file

    modelx = data if len(data) < max_voxels else np.random.choice(data, size=max_voxels)

    x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)

    # Estimate data pdf with KDE on a random subsample
    kde_skl = KernelDensity(
        bandwidth=0.05 * np.percentile(data, 98), kernel="gaussian"
    ).fit(modelx[:, np.newaxis])
    kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))

    # Find cutoff
    kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)

    # Fit X^2
    param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
    chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])

    # Compute goodness-of-fit (gof)
    gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
    if save_plot:
        out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)

    return gof, out_file 
開發者ID:poldracklab,項目名稱:mriqc,代碼行數:61,代碼來源:anatomical.py


注:本文中的scipy.stats.chi2.pdf方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。