本文整理汇总了Python中scipy.stats.gaussian_kde方法的典型用法代码示例。如果您正苦于以下问题:Python stats.gaussian_kde方法的具体用法?Python stats.gaussian_kde怎么用?Python stats.gaussian_kde使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.stats
的用法示例。
在下文中一共展示了stats.gaussian_kde方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mutualinfo_kde
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
示例2: _plot
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is {spv}.'.format(spv=spv))
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
示例3: insert_size
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def insert_size(insert_size_distribution):
"""Calculate cumulative distribution function from the raw insert size
distributin. Uses 1D kernel density estimation.
Args:
insert_size_distribution (list): list of insert sizes from aligned
read pairs
Returns:
1darray: a cumulative density function
"""
kde = stats.gaussian_kde(
insert_size_distribution,
bw_method=0.2 / np.std(insert_size_distribution, ddof=1))
x_grid = np.linspace(
min(insert_size_distribution),
max(insert_size_distribution), 1000)
kde = kde.evaluate(x_grid)
cdf = np.cumsum(kde)
cdf = cdf / cdf[-1]
return cdf
示例4: mutualinfo_kde_2sample
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
示例5: work
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
示例6: test_gaussian_kde_monkeypatch
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
示例7: _calc_density
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def _calc_density(x: np.ndarray, y: np.ndarray):
"""\
Function to calculate the density of cells in an embedding.
"""
from scipy.stats import gaussian_kde
# Calculate the point density
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
min_z = np.min(z)
max_z = np.max(z)
# Scale between 0 and 1
scaled_z = (z - min_z) / (max_z - min_z)
return scaled_z
示例8: test_gaussian_kde_monkeypatch
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
示例9: test_pdf_logpdf
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
示例10: estimate_mode
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def estimate_mode(acc):
""" Estimate the mode of a set of float values between 0 and 1.
:param acc: Data.
:returns: The mode of the sample
:rtype: float
"""
# Taken from sloika.
if len(acc) > 1:
da = gaussian_kde(acc)
optimization_result = minimize_scalar(lambda x: -da(x), bounds=(0, 1), method='brent')
if optimization_result.success:
try:
mode = optimization_result.x[0]
except IndexError:
mode = optimization_result.x
except TypeError:
mode = optimization_result.x
else:
sys.stderr.write("Mode computation failed")
mode = 0
else:
mode = acc[0]
return mode
示例11: fit
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def fit(self, X, **kwargs):
"""Fit the KDE estimator to data.
Parameters
----------
* `X` [array-like, shape=(n_samples, n_features)]:
The samples.
Returns
-------
* `self` [object]:
`self`.
"""
X = check_array(X).T
self.kde_ = gaussian_kde(X, bw_method=self.bandwidth)
return self
示例12: main
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def main():
mu = np.array([1, 10, 20])
sigma = np.matrix([[20, 10, 10],
[10, 25, 1],
[10, 1, 50]])
np.random.seed(100)
data = np.random.multivariate_normal(mu, sigma, 1000)
print(data.shape)
values = data.T
print(values.shape)
kde = stats.gaussian_kde(values)
density = kde(values)
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
x, y, z = values
#print(x.shape)
#print(y.shape)
#print(z.shape)
ax.scatter(x, y, z, c=density)
plt.show()
示例13: main
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def main():
mu = np.array([1, 10, 20])
sigma = np.matrix([[20, 10, 10],
[10, 25, 1],
[10, 1, 50]])
np.random.seed(100)
data = np.random.multivariate_normal(mu, sigma, 1000)
values = data.T
kde = stats.gaussian_kde(values)
# Create a regular 3D grid with 50 points in each dimension
xmin, ymin, zmin = data.min(axis=0)
xmax, ymax, zmax = data.max(axis=0)
xi, yi, zi = np.mgrid[xmin:xmax:50j, ymin:ymax:50j, zmin:zmax:50j]
# Evaluate the KDE on a regular grid...
coords = np.vstack([item.ravel() for item in [xi, yi, zi]])
density = kde(coords).reshape(xi.shape)
# Visualize the density estimate as isosurfaces
mlab.contour3d(xi, yi, zi, density, opacity=0.5)
mlab.axes()
mlab.show()
示例14: kde_scipy
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def kde_scipy(data, grid, **kwargs):
"""
Kernel Density Estimation with Scipy
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x p` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x p` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
"""
kde = gaussian_kde(data.T, **kwargs)
return kde.evaluate(grid.T)
示例15: add_density_lines
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import gaussian_kde [as 别名]
def add_density_lines(self, **kwargs):
for i, d in enumerate(self.data):
self.kde_bandwidth = self.kde_base / d.std(ddof=1)
density = stats.gaussian_kde(d, bw_method=self.kde_bandwidth)
x = np.arange(self.lowest, self.highest,
self.highest / len(self.hist[0][i]))
y = np.array(density(x))
limit = np.percentile(x, self.kde_perc)
y = y[np.where(x < limit)]
x = x[np.where(x < limit)]
#y2 = mpl.mlab.normpdf(x, np.mean(d), np.std(d))
ylim = self.ax.get_ylim()
xlim = self.ax.get_xlim()
self.ax.plot(
x,
y,
ls='--',
lw=.5,
c=self.palette[i][0],
label='%s, density' % self.labels[i])
#self.ax.plot(x, y2, ls = ':', lw = .5, c = self.palette[i][0])
self.ax.set_ylim(ylim)
self.ax.set_xlim(xlim)