本文整理汇总了Python中sklearn.neighbors.KDTree.valid_metrics方法的典型用法代码示例。如果您正苦于以下问题:Python KDTree.valid_metrics方法的具体用法?Python KDTree.valid_metrics怎么用?Python KDTree.valid_metrics使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors.KDTree
的用法示例。
在下文中一共展示了KDTree.valid_metrics方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: app_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def app_entropy(x, order=2, metric='chebyshev'):
"""Approximate Entropy
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times)
order : int (default: 2)
Embedding dimension.
metric : str (default: chebyshev)
Name of the metric function used with
:class:`~sklearn.neighbors.KDTree`. The list of available
metric functions is given by: ``KDTree.valid_metrics``.
Returns
-------
ae : float
Approximate Entropy.
"""
phi = _app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
示例2: sample_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def sample_entropy(x, order=2, metric='chebyshev'):
"""Sample Entropy.
Parameters
----------
x : list or np.array
One-dimensional time series of shape (n_times)
order : int (default: 2)
Embedding dimension.
metric : str (default: chebyshev)
Name of the metric function used with KDTree. The list of available
metric functions is given by: `KDTree.valid_metrics`.
Returns
-------
se : float
Sample Entropy.
"""
x = np.asarray(x, dtype=np.float64)
if metric == 'chebyshev' and x.size < 5000:
return _numba_sampen(x, mm=order, r=0.2)
else:
phi = _app_samp_entropy(x, order=order, metric=metric,
approximate=False)
示例3: test_kde_algorithm_metric_choice
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
示例4: entropy_app_samp_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def entropy_app_samp_entropy(x, order, metric="chebyshev", approximate=True):
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError(
"The given metric (%s) is not valid. The valid " "metric names are: %s" % (metric, _all_metrics)
)
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = entropy_embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r, count_only=True).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = entropy_embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r, count_only=True).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
示例5: _app_samp_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def _app_samp_entropy(x, order, metric='chebyshev', approximate=True):
"""Utility function for `app_entropy`` and `sample_entropy`.
"""
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = _embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,
count_only=True
).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = _embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,
count_only=True
).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
#The main code
示例6: __init__
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def __init__(self,
n_neighbors=5,
max_window_size=1000,
leaf_size=30,
metric='euclidean'):
self.n_neighbors = n_neighbors
self.max_window_size = max_window_size
self.leaf_size = leaf_size
if metric not in self.valid_metrics():
raise ValueError("Invalid metric: {}.\n"
"Valid options are: {}".format(metric,
self.valid_metrics()))
self.metric = metric
self.data_window = SlidingWindow(window_size=max_window_size)
示例7: valid_metrics
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def valid_metrics():
""" Get valid distance metrics for the KDTree. """
return KDTree.valid_metrics
示例8: test_kde_algorithm_metric_choice
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def test_kde_algorithm_metric_choice(algorithm, metric):
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
示例9: compute_app_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def compute_app_entropy(data, emb=2, metric='chebyshev'):
"""Approximate Entropy (AppEn, per channel).
Parameters
----------
data : ndarray, shape (n_channels, n_times)
emb : int (default: 2)
Embedding dimension.
metric : str (default: chebyshev)
Name of the metric function used with
:class:`~sklearn.neighbors.KDTree`. The list of available
metric functions is given by: ``KDTree.valid_metrics``.
Returns
-------
output : ndarray, shape (n_channels,)
Notes
-----
Alias of the feature function: **app_entropy**. See [1]_.
References
----------
.. [1] Richman, J. S. et al. (2000). Physiological time-series analysis
using approximate entropy and sample entropy. American Journal of
Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
"""
phi = _app_samp_entropy_helper(data, emb=emb, metric=metric,
approximate=True)
return np.subtract(phi[:, 0], phi[:, 1])
示例10: compute_samp_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def compute_samp_entropy(data, emb=2, metric='chebyshev'):
"""Sample Entropy (SampEn, per channel).
Parameters
----------
data : ndarray, shape (n_channels, n_times)
emb : int (default: 2)
Embedding dimension.
metric : str (default: chebyshev)
Name of the metric function used with KDTree. The list of available
metric functions is given by: `KDTree.valid_metrics`.
Returns
-------
output : ndarray, shape (n_channels,)
Notes
-----
Alias of the feature function: **samp_entropy**. See [1]_.
References
----------
.. [1] Richman, J. S. et al. (2000). Physiological time-series analysis
using approximate entropy and sample entropy. American Journal of
Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
"""
phi = _app_samp_entropy_helper(data, emb=emb, metric=metric,
approximate=False)
if np.allclose(phi[:, 0], 0) or np.allclose(phi[:, 1], 0):
raise ValueError('Sample Entropy is not defined.')
else:
return -np.log(np.divide(phi[:, 1], phi[:, 0]))
示例11: _app_samp_entropy
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def _app_samp_entropy(x, order, metric='chebyshev', approximate=True):
"""Utility function for `app_entropy`` and `sample_entropy`.
"""
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
phi = np.zeros(2)
r = 0.2 * np.std(x, ddof=0)
# compute phi(order, r)
_emb_data1 = _embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,
count_only=True
).astype(np.float64)
# compute phi(order + 1, r)
emb_data2 = _embed(x, order + 1, 1)
count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,
count_only=True
).astype(np.float64)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
示例12: test_kde_sample_weights
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def test_kde_sample_weights():
n_samples = 400
size_test = 20
weights_neutral = np.full(n_samples, 3.)
for d in [1, 2, 10]:
rng = np.random.RandomState(0)
X = rng.rand(n_samples, d)
weights = 1 + (10 * X.sum(axis=1)).astype(np.int8)
X_repetitions = np.repeat(X, weights, axis=0)
n_samples_test = size_test // d
test_points = rng.rand(n_samples_test, d)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev']:
if algorithm != 'kd_tree' or metric in KDTree.valid_metrics:
kde = KernelDensity(algorithm=algorithm, metric=metric)
# Test that adding a constant sample weight has no effect
kde.fit(X, sample_weight=weights_neutral)
scores_const_weight = kde.score_samples(test_points)
sample_const_weight = kde.sample(random_state=1234)
kde.fit(X)
scores_no_weight = kde.score_samples(test_points)
sample_no_weight = kde.sample(random_state=1234)
assert_allclose(scores_const_weight, scores_no_weight)
assert_allclose(sample_const_weight, sample_no_weight)
# Test equivalence between sampling and (integer) weights
kde.fit(X, sample_weight=weights)
scores_weight = kde.score_samples(test_points)
sample_weight = kde.sample(random_state=1234)
kde.fit(X_repetitions)
scores_ref_sampling = kde.score_samples(test_points)
sample_ref_sampling = kde.sample(random_state=1234)
assert_allclose(scores_weight, scores_ref_sampling)
assert_allclose(sample_weight, sample_ref_sampling)
# Test that sample weights has a non-trivial effect
diff = np.max(np.abs(scores_no_weight - scores_weight))
assert diff > 0.001
# Test invariance with respect to arbitrary scaling
scale_factor = rng.rand()
kde.fit(X, sample_weight=(scale_factor * weights))
scores_scaled_weight = kde.score_samples(test_points)
assert_allclose(scores_scaled_weight, scores_weight)
示例13: _app_samp_entropy_helper
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import valid_metrics [as 别名]
def _app_samp_entropy_helper(data, emb, metric='chebyshev',
approximate=True):
"""Utility function for `compute_app_entropy`` and `compute_samp_entropy`.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
emb : int (default: 2)
Embedding dimension.
metric : str (default: chebyshev)
Name of the metric function used with KDTree. The list of available
metric functions is given by: ``KDTree.valid_metrics``.
approximate : bool (default: True)
If True, the returned values will be used to compute the
Approximate Entropy (AppEn). Otherwise, the values are used to compute
the Sample Entropy (SampEn).
Returns
-------
output : ndarray, shape (n_channels, 2)
"""
_all_metrics = KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError('The given metric (%s) is not valid. The valid '
'metric names are: %s' % (metric, _all_metrics))
n_channels, n_times = data.shape
phi = np.empty((n_channels, 2))
for j in range(n_channels):
r = 0.2 * np.std(data[j, :], axis=-1, ddof=1)
# compute phi(emb, r)
_emb_data1 = _embed(data[j, None], emb, 1)[0, :, :]
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1, :]
count1 = KDTree(emb_data1, metric=metric).query_radius(
emb_data1, r, count_only=True).astype(np.float64)
# compute phi(emb + 1, r)
emb_data2 = _embed(data[j, None], emb + 1, 1)[0, :, :]
count2 = KDTree(emb_data2, metric=metric).query_radius(
emb_data2, r, count_only=True).astype(np.float64)
if approximate:
phi[j, 0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[j, 1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[j, 0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[j, 1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi