本文整理汇总了Python中sklearn.neighbors.KDTree方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.KDTree方法的具体用法?Python neighbors.KDTree怎么用?Python neighbors.KDTree使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors
的用法示例。
在下文中一共展示了neighbors.KDTree方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def fit(self, X, y):
""" Fit Analog model using a KDTree
Parameters
----------
X : pd.Series or pd.DataFrame, shape (n_samples, 1)
Training data
y : pd.Series or pd.DataFrame, shape (n_samples, 1)
Target values.
Returns
-------
self : returns an instance of self.
"""
if len(X) < self.n_analogs:
warnings.warn('length of X is less than n_analogs, setting n_analogs = len(X)')
self.n_analogs = len(X)
self.kdtree_ = KDTree(X, **self.kdtree_kwargs)
self.y_ = y
return self
示例2: point_cloud_overlap
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def point_cloud_overlap(pc_src,pc_tgt,R_gt_44):
pc_src_trans = np.matmul(R_gt_44[:3,:3],pc_src.T) +R_gt_44[:3,3:4]
tree = KDTree(pc_tgt)
nearest_dist, nearest_ind = tree.query(pc_src_trans.T, k=1)
nns2t = np.min(nearest_dist)
hasCorres=(nearest_dist < 0.08)
overlap_val_s2t = hasCorres.sum()/pc_src.shape[0]
pc_tgt_trans = np.matmul(np.linalg.inv(R_gt_44),np.concatenate((pc_tgt.T,np.ones([1,pc_tgt.shape[0]]))))[:3,:]
tree = KDTree(pc_src)
nearest_dist, nearest_ind = tree.query(pc_tgt_trans.T, k=1)
nnt2s = np.min(nearest_dist)
hasCorres=(nearest_dist < 0.08)
overlap_val_t2s = hasCorres.sum()/pc_tgt.shape[0]
overlap_val = max(overlap_val_s2t,overlap_val_t2s)
cam_dist_this = np.linalg.norm(R_gt_44[:3,3])
pc_dist_this = np.linalg.norm(pc_src_trans.mean(1) - pc_tgt.T.mean(1))
pc_nn = (nns2t+nnt2s)/2
return overlap_val,cam_dist_this,pc_dist_this,pc_nn
示例3: kd_align
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def kd_align(emb1, emb2, normalize=False, distance_metric = "euclidean", num_top = 50):
kd_tree = KDTree(emb2, metric = distance_metric)
row = np.array([])
col = np.array([])
data = np.array([])
dist, ind = kd_tree.query(emb1, k = num_top)
print "queried alignments"
row = np.array([])
for i in range(emb1.shape[0]):
row = np.concatenate((row, np.ones(num_top)*i))
col = ind.flatten()
data = np.exp(-dist).flatten()
sparse_align_matrix = coo_matrix((data, (row, col)), shape=(emb1.shape[0], emb2.shape[0]))
return sparse_align_matrix.tocsr()
示例4: test_unsupervised_inputs
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
示例5: freeze_junction
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def freeze_junction(self, status=True):
self._freeze_junction = status
if status:
clusters = fclusterdata(self._junctions, self._eps_junc, criterion="distance")
junc_groups = {}
for ind_junc, ind_group in enumerate(clusters):
if ind_group not in junc_groups.keys():
junc_groups[ind_group] = []
junc_groups[ind_group].append(self._junctions[ind_junc])
if self.verbose:
print(f"{len(self._junctions) - len(junc_groups)} junctions merged.")
self._junctions = [np.mean(junc_group, axis=0) for junc_group in junc_groups.values()]
self._kdtree = KDTree(self._junctions, leaf_size=30)
dists, inds = self._kdtree.query(self._junctions, k=2)
repl_inds = np.nonzero(dists.sum(axis=1) < self._eps_junc)[0].tolist()
# assert len(repl_inds) == 0
else:
self._kdtree = None
示例6: test_neighbors
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_neighbors(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.neighbors(k=10)
indices = ing._indices
tree = KDTree(adata_ref.obsm['X_pca'])
true_indices = tree.query(ing._obsm['rep'], 10, return_distance=False)
num_correct = 0.0
for i in range(adata_new.n_obs):
num_correct += np.sum(np.in1d(true_indices[i], indices[i]))
percent_correct = num_correct / (adata_new.n_obs * 10)
assert percent_correct > 0.99
示例7: knnsearch
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def knnsearch(target, source, metrics = 'euclidean', k_size =1, leaf_sizes=30):
"""use target build KDTree
use source to calculate it
```
"""
# make sure they have the same size
if not (target.shape[1] == source.shape[1]):
raise('Two Inputs are not same size or They need to be [N(size), D(dimension)] input')
kdt_build = KDTree(target, leaf_size = leaf_sizes, metric=metrics)
distances, indices = kdt_build.query(source, k=k_size)
averagedist = np.sum(distances) / (source.shape[0]) # assume they have [N,D]
return (averagedist, distances, indices)
# get high frequency vert list
示例8: test_nn_descent_neighbor_accuracy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_nn_descent_neighbor_accuracy():
knn_indices, _ = NNDescent(
nn_data, "euclidean", {}, 10, random_state=np.random
)._neighbor_graph
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (nn_data.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.98,
"NN-descent did not get 99% " "accuracy on nearest neighbors",
)
示例9: test_angular_nn_descent_neighbor_accuracy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_angular_nn_descent_neighbor_accuracy():
knn_indices, _ = NNDescent(
nn_data, "cosine", {}, 10, random_state=np.random
)._neighbor_graph
angular_data = normalize(nn_data, norm="l2")
tree = KDTree(angular_data)
true_indices = tree.query(angular_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (nn_data.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.98,
"NN-descent did not get 99% " "accuracy on nearest neighbors",
)
示例10: test_sparse_nn_descent_neighbor_accuracy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_sparse_nn_descent_neighbor_accuracy():
knn_indices, _ = NNDescent(
sparse_nn_data, "euclidean", n_neighbors=20, random_state=None
)._neighbor_graph
tree = KDTree(sparse_nn_data.toarray())
true_indices = tree.query(sparse_nn_data.toarray(), 10, return_distance=False)
num_correct = 0.0
for i in range(sparse_nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.85,
"Sparse NN-descent did not get 95% " "accuracy on nearest neighbors",
)
示例11: test_sparse_angular_nn_descent_neighbor_accuracy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_sparse_angular_nn_descent_neighbor_accuracy():
knn_indices, _ = NNDescent(
sparse_nn_data, "cosine", {}, 20, random_state=None
)._neighbor_graph
angular_data = normalize(sparse_nn_data, norm="l2").toarray()
tree = KDTree(angular_data)
true_indices = tree.query(angular_data, 10, return_distance=False)
num_correct = 0.0
for i in range(sparse_nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.85,
"Sparse angular NN-descent did not get 98% " "accuracy on nearest neighbors",
)
示例12: test_nn_descent_query_accuracy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_nn_descent_query_accuracy():
nnd = NNDescent(nn_data[200:], "euclidean", n_neighbors=10, random_state=None)
knn_indices, _ = nnd.query(nn_data[:200], k=10, epsilon=0.2)
tree = KDTree(nn_data[200:])
true_indices = tree.query(nn_data[:200], 10, return_distance=False)
num_correct = 0.0
for i in range(true_indices.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (true_indices.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.95,
"NN-descent query did not get 95% " "accuracy on nearest neighbors",
)
# @SkipTest
示例13: test_random_state_none
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def test_random_state_none():
knn_indices, _ = NNDescent(
nn_data, "euclidean", {}, 10, random_state=None
)._neighbor_graph
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (spatial_data.shape[0] * 10)
assert_greater_equal(
percent_correct,
0.99,
"NN-descent did not get 99% " "accuracy on nearest neighbors",
)
示例14: neighbours
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def neighbours(self, word, size = 10):
"""
Get nearest words with KDTree, ranking by cosine distance
"""
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x]
示例15: construct_query_dict
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KDTree [as 别名]
def construct_query_dict(df_centroids, filename):
tree = KDTree(df_centroids[['northing','easting']])
ind_nn = tree.query_radius(df_centroids[['northing','easting']],r=10)
ind_r = tree.query_radius(df_centroids[['northing','easting']], r=50)
queries={}
for i in range(len(ind_nn)):
query=df_centroids.iloc[i]["file"]
positives=np.setdiff1d(ind_nn[i],[i]).tolist()
negatives=np.setdiff1d(df_centroids.index.values.tolist(),ind_r[i]).tolist()
random.shuffle(negatives)
queries[i]={"query":query,"positives":positives,"negatives":negatives}
with open(filename, 'wb') as handle:
pickle.dump(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
####Initialize pandas DataFrame