本文整理汇总了Python中sklearn.neighbors.BallTree方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.BallTree方法的具体用法?Python neighbors.BallTree怎么用?Python neighbors.BallTree使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors
的用法示例。
在下文中一共展示了neighbors.BallTree方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: avgdigamma
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def avgdigamma(data, dvec, leaf_size=16):
"""Convenience function for finding expectation value of <psi(nx)> given
some number of neighbors in some radius in a marginal space.
Parameters
----------
points : numpy.ndarray
dvec : array_like (n_points,)
Returns
-------
avgdigamma : float
expectation value of <psi(nx)>
"""
tree = BallTree(data, leaf_size=leaf_size, p=float('inf'))
n_points = tree.query_radius(data, dvec - EPS, count_only=True)
return digamma(n_points).mean()
示例2: test_unsupervised_inputs
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
示例3: test_haversine
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def test_haversine():
tree = BallTree(spatial_data[:, :2], metric="haversine")
dist_matrix, _ = tree.query(spatial_data[:, :2], k=spatial_data.shape[0])
test_matrix = np.array(
[
[
dist.haversine(spatial_data[i, :2], spatial_data[j, :2])
for j in range(spatial_data.shape[0])
]
for i in range(spatial_data.shape[0])
]
)
test_matrix.sort(axis=1)
assert_array_almost_equal(
test_matrix,
dist_matrix,
err_msg="Distances don't match " "for metric haversine",
)
示例4: test_haversine
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def test_haversine(spatial_data):
tree = BallTree(spatial_data[:, :2], metric="haversine")
dist_matrix, _ = tree.query(spatial_data[:, :2], k=spatial_data.shape[0])
test_matrix = np.array(
[
[
dist.haversine(spatial_data[i, :2], spatial_data[j, :2])
for j in range(spatial_data.shape[0])
]
for i in range(spatial_data.shape[0])
]
)
test_matrix.sort(axis=1)
assert_array_almost_equal(
test_matrix,
dist_matrix,
err_msg="Distances don't match " "for metric haversine",
)
示例5: make_propensity_lists
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def make_propensity_lists(self, train_ids, benchmark):
input_data, ids, pair_data = benchmark.get_data_access().get_rows(train_ids)
assignments = map(benchmark.get_assignment, ids, input_data)
treatment_data, batch_y = zip(*assignments)
treatment_data = np.array(treatment_data)
if pair_data.shape[-1] > 200 and False:
self.pca = PCA(50, svd_solver="randomized")
pair_data = self.pca.fit_transform(pair_data)
else:
self.pca = None
# covariance_matrix = np.cov(pair_data, rowvar=False)
self.original_data = [pair_data[treatment_data == t]
for t in range(benchmark.get_num_treatments())]
# self.ball_trees = [BallTree(pair_data[treatment_data == t], metric="mahalanobis",
# V=covariance_matrix)
# for t in range(benchmark.get_num_treatments())]
self.ball_trees = [BallTree(pair_data[treatment_data == t])
for t in range(benchmark.get_num_treatments())]
self.treatment_ids = [ids[treatment_data == t]
for t in range(benchmark.get_num_treatments())]
示例6: test_objectmapper
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.neighbors.NearestNeighbors,
neighbors.NearestNeighbors)
self.assertIs(df.neighbors.KNeighborsClassifier,
neighbors.KNeighborsClassifier)
self.assertIs(df.neighbors.RadiusNeighborsClassifier,
neighbors.RadiusNeighborsClassifier)
self.assertIs(df.neighbors.KNeighborsRegressor,
neighbors.KNeighborsRegressor)
self.assertIs(df.neighbors.RadiusNeighborsRegressor,
neighbors.RadiusNeighborsRegressor)
self.assertIs(df.neighbors.NearestCentroid, neighbors.NearestCentroid)
self.assertIs(df.neighbors.BallTree, neighbors.BallTree)
self.assertIs(df.neighbors.KDTree, neighbors.KDTree)
self.assertIs(df.neighbors.DistanceMetric, neighbors.DistanceMetric)
self.assertIs(df.neighbors.KernelDensity, neighbors.KernelDensity)
示例7: calc_vert_vals
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def calc_vert_vals(verts, pts, vals, method='max', k_points=100):
from sklearn.neighbors import BallTree
ball_tree = BallTree(pts)
k_points = min([k_points, len(pts)])
dists, pts_inds = ball_tree.query(verts, k=k_points, return_distance=True)
near_vals = vals[pts_inds]
# sig_dists = dists[np.where(abs(near_vals)>2)]
cover = len(np.unique(pts_inds.ravel()))/float(len(pts))
print('{}% of the points are covered'.format(cover*100))
if method == 'dist':
n_dists = 1/(dists**2)
norm = 1/np.sum(n_dists, 1)
norm = np.reshape(norm, (len(norm), 1))
n_dists = norm * n_dists
verts_vals = np.sum(near_vals * n_dists, 1)
elif method == 'max':
verts_vals = near_vals[range(near_vals.shape[0]), np.argmax(abs(near_vals), 1)]
return verts_vals
示例8: test_barnes_hut_angle
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
distances_nn = np.array([distances[i, neighbors_nn[i]]
for i in range(n_samples)])
assert np.all(distances[0, neighbors_nn[0]] == distances_nn[0]),\
abs(distances[0, neighbors_nn[0]] - distances_nn[0])
P_bh = _joint_probabilities_nn(distances_nn, neighbors_nn,
perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
示例9: fit
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def fit(self, X):
X = self._validate_input(X, return_compact=False)
self._tree = BallTree(X, metric='hamming', leaf_size=self.leaf_size)
return self
示例10: build_tree
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def build_tree(points):
if points.shape[1] >= 20:
return BallTree(points, metric='chebyshev')
return KDTree(points, metric='chebyshev')
# TESTS
示例11: update_tree
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def update_tree(self, time):
print 'rebuild tree'
self.tree = BallTree(self.state[:self.items, :], leaf_size=self.size)
self.last_tree_built_time = time
print 'rebuild done'
示例12: transform
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def transform(self, documents):
return [
BallTree(documents)
]
示例13: fit_transform
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def fit_transform(self, documents):
# Transformer will be False if pipeline hasn't been fit yet,
# Trigger fit_transform and save the transformer and lexicon.
if self.transformer == False:
self.transformer = Pipeline([
('norm', TextNormalizer(minimum=50, maximum=200)),
('transform', Pipeline([
('tfidf', TfidfVectorizer()),
('svd', TruncatedSVD(n_components=200))
])
)
])
self.lexicon = self.transformer.fit_transform(documents)
self.tree = BallTree(self.lexicon)
self.save()
示例14: knn_interpolation
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def knn_interpolation(cumulated_pc: np.ndarray, full_sized_data: np.ndarray, k=5):
"""
Using k-nn interpolation to find labels of points of the full sized pointcloud
:param cumulated_pc: cumulated pointcloud results after running the network
:param full_sized_data: full sized point cloud
:param k: k for k nearest neighbor interpolation
:return: pointcloud with predicted labels in last column and ground truth labels in last but one column
"""
labeled = cumulated_pc[cumulated_pc[:, -1] != -1]
to_be_predicted = full_sized_data.copy()
ball_tree = BallTree(labeled[:, :3], metric='euclidean')
knn_classes = labeled[ball_tree.query(to_be_predicted[:, :3], k=k)[1]][:, :, -1].astype(int)
interpolated = np.zeros(knn_classes.shape[0])
for i in range(knn_classes.shape[0]):
interpolated[i] = np.bincount(knn_classes[i]).argmax()
output = np.zeros((to_be_predicted.shape[0], to_be_predicted.shape[1]+1))
output[:, :-1] = to_be_predicted
output[:, -1] = interpolated
return output
示例15: _calc_ball_trees
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import BallTree [as 别名]
def _calc_ball_trees(self, metric='euclidean'):
ball_trees = []
for pointcloud_data in tqdm(self.dataset.data, desc='Ball trees have to be calculated from scratch'):
ball_trees.append(BallTree(pointcloud_data[:, :2], metric=metric))
return ball_trees