本文整理汇总了Python中scipy.stats.expon方法的典型用法代码示例。如果您正苦于以下问题:Python stats.expon方法的具体用法?Python stats.expon怎么用?Python stats.expon使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.stats
的用法示例。
在下文中一共展示了stats.expon方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testExponentialLogPDF
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialLogPDF(self):
with tf.Session():
batch_size = 6
lam = tf.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = tf.contrib.distributions.Exponential(lam=lam)
expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v)
log_pdf = exponential.log_pdf(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = exponential.pdf(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
示例2: setUp_configure
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Exponential
self.scipy_dist = stats.expon
self.test_targets = set([
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
'mean', 'sample', 'support', 'variance'])
lam = numpy.exp(numpy.random.uniform(
-1, 1, self.shape)).astype(numpy.float32)
lam = numpy.asarray(lam)
self.params = {'lam': lam}
self.scipy_params = {'scale': 1 / lam}
self.support = 'positive'
示例3: testExponentialSampleMultiDimensional
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
lam_v = [3.0, 22.0]
lam = tf.constant([lam_v] * batch_size)
exponential = tf.contrib.distributions.Exponential(lam=lam)
n = 100000
samples = exponential.sample(n, seed=138)
self.assertEqual(samples.get_shape(), (n, batch_size, 2))
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
for i in range(2):
self.assertLess(
stats.kstest(
sample_values[:, 0, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0],
0.01)
self.assertLess(
stats.kstest(
sample_values[:, 1, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0],
0.01)
示例4: test_fit
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def test_fit():
p1 = Normal(mu=T.constant(0.0), sigma=T.constant(2.0))
p2 = Normal(mu=T.constant(3.0), sigma=T.constant(2.0))
p3 = Exponential(inverse_scale=T.constant(0.5))
g = theano.shared(0.5)
m = Mixture(components=[p1, p2, p3], weights=[g, g*g])
X = np.concatenate([st.norm(loc=0.0, scale=2.0).rvs(300, random_state=0),
st.norm(loc=3.0, scale=2.0).rvs(100, random_state=1),
st.expon(scale=1. / 0.5).rvs(500, random_state=2)])
X = X.reshape(-1, 1)
s0 = m.score(X)
m.fit(X)
assert np.abs(g.eval() - 1. / 3.) < 0.05
assert m.score(X) >= s0
示例5: diff_exp_dis
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def diff_exp_dis():
"""
不同参数下的指数分布
:return:
"""
exp_dis_0_5 = stats.expon(scale=0.5)
exp_dis_1 = stats.expon(scale=1)
exp_dis_2 = stats.expon(scale=2)
x1 = np.linspace(exp_dis_0_5.ppf(0.001), exp_dis_0_5.ppf(0.9999), 100)
x2 = np.linspace(exp_dis_1.ppf(0.001), exp_dis_1.ppf(0.999), 100)
x3 = np.linspace(exp_dis_2.ppf(0.001), exp_dis_2.ppf(0.99), 100)
fig, ax = plt.subplots(1, 1)
ax.plot(x1, exp_dis_0_5.pdf(x1), 'b-', lw=2, label=r'lambda = 2')
ax.plot(x2, exp_dis_1.pdf(x2), 'g-', lw=2, label='lambda = 1')
ax.plot(x3, exp_dis_2.pdf(x3), 'r-', lw=2, label='lambda = 0.5')
plt.ylabel('Probability')
plt.title(r'PDF of Exponential Distribution')
ax.legend(loc='best', frameon=False)
plt.show()
# diff_exp_dis()
示例6: test_random_search_cv_results
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def test_random_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for iid in (False, True):
search = RandomizedSearchCV(SVC(gamma='scale'), n_iter=n_search_iter,
cv=n_splits, iid=iid,
param_distributions=params,
return_train_score=True)
search.fit(X, y)
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert not(any(np.ma.getmaskarray(cv_results['param_C'])) or
any(np.ma.getmaskarray(cv_results['param_gamma'])))
示例7: testExponentialCDF
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialCDF(self):
with tf.Session():
batch_size = 6
lam = tf.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = tf.contrib.distributions.Exponential(lam=lam)
expected_cdf = stats.expon.cdf(x, scale=1 / lam_v)
cdf = exponential.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
示例8: testExponentialMean
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialMean(self):
with tf.Session():
lam_v = np.array([1.0, 4.0, 2.5])
expected_mean = stats.expon.mean(scale=1 / lam_v)
exponential = tf.contrib.distributions.Exponential(lam=lam_v)
self.assertEqual(exponential.mean().get_shape(), (3,))
self.assertAllClose(exponential.mean().eval(), expected_mean)
示例9: testExponentialVariance
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialVariance(self):
with tf.Session():
lam_v = np.array([1.0, 4.0, 2.5])
expected_variance = stats.expon.var(scale=1 / lam_v)
exponential = tf.contrib.distributions.Exponential(lam=lam_v)
self.assertEqual(exponential.variance().get_shape(), (3,))
self.assertAllClose(exponential.variance().eval(), expected_variance)
示例10: testExponentialEntropy
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def testExponentialEntropy(self):
with tf.Session():
lam_v = np.array([1.0, 4.0, 2.5])
expected_entropy = stats.expon.entropy(scale=1 / lam_v)
exponential = tf.contrib.distributions.Exponential(lam=lam_v)
self.assertEqual(exponential.entropy().get_shape(), (3,))
self.assertAllClose(exponential.entropy().eval(), expected_entropy)
示例11: check_exponential
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def check_exponential(inverse_scale):
rng = check_random_state(1)
p_carl = Exponential(inverse_scale=inverse_scale)
p_scipy = st.expon(scale=1. / inverse_scale)
X = rng.rand(50, 1)
assert_array_almost_equal(p_carl.pdf(X),
p_scipy.pdf(X.ravel()))
assert_array_almost_equal(p_carl.cdf(X),
p_scipy.cdf(X.ravel()))
assert_array_almost_equal(-np.log(p_carl.pdf(X)),
p_carl.nll(X))
示例12: check_fit
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def check_fit(inverse_scale):
p = Exponential()
X = st.expon(scale=1. / inverse_scale).rvs(5000,
random_state=0).reshape(-1, 1)
p.fit(X)
assert np.abs(p.inverse_scale.get_value() - inverse_scale) <= 0.1
示例13: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def __init__(self, rate=None):
self.rate = rate
if (self.rate is not None) and (self.rate > 0.0):
#self.mean = 1. / self.rate
#self.variance = 1./(self.rate)**2
self.skewness = 2.0
self.kurtosis = 6.0
self.bounds = np.array([0.0, np.inf])
self.x_range_for_pdf = np.linspace(0.0, 20*self.rate, RECURRENCE_PDF_SAMPLES)
self.parent = expon(scale=1.0/rate)
self.mean = self.parent.mean()
self.variance = self.parent.var()
示例14: test_fit
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def test_fit(self):
"""Tests RandomizedSearchCV fit()."""
x_np, y_np = datasets.load_iris(return_X_y=True)
p = np.random.permutation(len(x_np)) # Pre-shuffling required for CSVM
x = ds.array(x_np[p], (30, 4))
y = ds.array((y_np[p] == 0)[:, np.newaxis], (30, 1))
param_distributions = {'c': stats.expon(scale=0.5),
'gamma': stats.expon(scale=1)}
csvm = CascadeSVM()
n_iter = 12
k = 3
searcher = RandomizedSearchCV(estimator=csvm,
param_distributions=param_distributions,
n_iter=n_iter, cv=k, random_state=0)
searcher.fit(x, y)
expected_keys = {'param_c', 'param_gamma', 'params', 'mean_test_score',
'std_test_score', 'rank_test_score'}
split_keys = {'split%d_test_score' % i for i in range(k)}
expected_keys.update(split_keys)
self.assertSetEqual(set(searcher.cv_results_.keys()), expected_keys)
self.assertEqual(len(searcher.cv_results_['param_c']), n_iter)
self.assertTrue(hasattr(searcher, 'best_estimator_'))
self.assertTrue(hasattr(searcher, 'best_score_'))
self.assertTrue(hasattr(searcher, 'best_params_'))
self.assertTrue(hasattr(searcher, 'best_index_'))
self.assertTrue(hasattr(searcher, 'scorer_'))
self.assertEqual(searcher.n_splits_, k)
示例15: score
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import expon [as 别名]
def score(self, Y):
E, T = Y["Event"], Y["Time"]
cens = (1 - E) * np.log(1 - self.dist.cdf(T) + eps)
uncens = E * self.dist.logpdf(T)
return -(cens + uncens)