本文整理汇总了Python中sklearn.neighbors.KDTree.query方法的典型用法代码示例。如果您正苦于以下问题:Python KDTree.query方法的具体用法?Python KDTree.query怎么用?Python KDTree.query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors.KDTree
的用法示例。
在下文中一共展示了KDTree.query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: KDBasedKNearestNeighbor
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
class KDBasedKNearestNeighbor(object):
"""
KDTree-based KNN classifier with L2 distance
"""
def __init__(self, k=1):
self.k = k
def fit(self, X_train, y_train):
"""
Build KDtree using
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
"""
self.X_train = X_train
self.y_train = y_train
return self
def calc_dist(self, X_test, metric, k=None):
if k == None:
k = self.k
self.kd_tree = KDTree(self.X_train, metric=metric, leaf_size=self.k)
return self
def get_neighbors(self, X_test, k=None):
if k == None:
k = self.k
neighbors = self.kd_tree.query(X_test, k)
num_test = X_test.shape[0]
y_pred = numpy.zeros(num_test)
return neighbors[1]
def predict_labels(self, X_test, k=None):
"""
Make prediction using kdtree
Return array of predicted labels
"""
if k == None:
k = self.k
neighbors = self.kd_tree.query(X_test, k)
num_test = X_test.shape[0]
y_pred = numpy.zeros(num_test)
for i in range(num_test):
closest_y = self.y_train[neighbors[1][i]]
count = Counter(closest_y)
# print(count.most_common(1))
y_pred[i] = count.most_common(1)[0][0]
return y_pred
示例2: patch_classify
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def patch_classify():
"""
patch可视化:观察patch在。
PCA空间,训练数据和实际数据的关系。
构造了kd-tree
"""
with open('training_data_full.pickle') as f:
# 读取对应的原始patch
kk = open("raw_data_full.pickle", 'rb')
raw_lib = cPickle.load(kk)
raw_lib = np.asarray(raw_lib, dtype='float32')
# 读取数据转换特征
training_data = cPickle.load(f)
patch_lib, feature_lib = training_data
feature_lib, patch_lib = (np.asarray(feature_lib, dtype='float32'), np.asarray(patch_lib, dtype='float32'))
feature_lib = feature_lib.reshape((-1, 4 * 9 * 9))
# 构造KD-tree
tree = KDTree(feature_lib, leaf_size=len(feature_lib) / 100)
# 在KD-tree当中搜索最近的100个点
dist, ind1 = tree.query(feature_lib[5678], k=100)
nn1 = feature_lib[ind1][0]
dist, ind2 = tree.query(feature_lib[10000], k=100)
nn2 = feature_lib[ind2][0]
dist, ind3 = tree.query(feature_lib[1233], k=100)
nn3 = feature_lib[ind3][0]
# 计算并转换PCA空间
pca = PCA(n_components=2)
d2_data = pca.fit_transform(feature_lib).T
# 降临近点的高维坐标转换成PCA空间的低维坐标
r1 = pca.transform(nn1).T
r2 = pca.transform(nn2).T
r3 = pca.transform(nn3).T
# 设置绘制范围
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
# 绘制全部数据的散点图
ax.scatter(d2_data[0], d2_data[1], c='g')
# 绘制三个类别的散点图
ax.scatter(r1[0], r1[1], c='r')
ax.scatter(r2[0], r2[1], c='b')
ax.scatter(r3[0], r3[1], c='y')
# patch_lib \ raw_lib分别是差值patch和原始patch
patch_show(raw_lib[ind1][0], [0.05, 0.05, 0.4, 0.4], 'red')
patch_show(raw_lib[ind2][0], [0.05, 0.55, 0.4, 0.4], 'blue')
patch_show(raw_lib[ind3][0], [0.55, 0.05, 0.4, 0.4], 'yellow')
plt.show()
示例3: neighbour3dpoints
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def neighbour3dpoints(seqno,f1,f2,no_sets,pointsperset):
pcl1name = 'seq'+seqno+'frame'+str(f1)
pcl2name = 'seq'+seqno+'frame'+str(f2)
path1 = '/home/manish/Awesomestuff/Subjects/IVP/Project_stereo/gen_data/coordinates/'+ str(pcl1name)+'.npy'
path2 = '/home/manish/Awesomestuff/Subjects/IVP/Project_stereo/gen_data/coordinates/'+ str(pcl2name)+'.npy'
cords1 = np.load(path1)
cords2 = np.load(path2)
i1 = hp.loadimage_kitti(seqno,'l',f1,0)
i2 = hp.loadimage_kitti(seqno,'l',f2,0)
(h,l) = i1.shape
(pts_1,pts_2) = getfeatures(img, template, no_sets, 0)
pts3d_1 = featurepoint_toworldtransform(pts_1, (h,l), cords1)
pts3d_2 = featurepoint_toworldtransform(pts_2, (h,l), cords2)
mask1_1 = np.abs(pts3d_1[:,2])<50;
mask1_2 = pts3d_1[:,2]>0
mask1 = np.logical_and(mask1_1,mask1_2)
mask2_1 = np.abs(pts3d_2[:,2])<50;
mask2_2 = pts3d_2[:,2]>0
mask2 = np.logical_and(mask2_1,mask2_2)
mask = np.logical_and(mask1,mask2)
pts3d_1 = pts3d_1[mask]
pts3d_2 = pts3d_2[mask]
n_keypoints = len(pts3d_1)
print('Total of ' + str(n_keypoints) + ' keypoints are found')
kdt1=KDTree(cords1,leaf_size=30,metric='euclidean')
dist1, idx1 = kdt1.query(pts3d_1, k=pointsperset, return_distance=True) #Gives in sorted order.
pset1 = []
n_sets = min(n_keypoints,no_sets) #Checking if we have given number of keypoint matches as the sets or not.
print('Total of ' + str(n_sets)+ ' sets are found')
for i in range(n_sets):
pset1.append(pts3d_1[i])
for j in range(pointsperset):
pset1.append(cords1[idx1[i][j]])
pset1 = np.array(pset1)
kdt2 = KDTree(cords2, leaf_size=30, metric='euclidean')
dist2, idx2 = kdt2.query(pts3d_2, k=pointsperset, return_distance= True)
pset2 = []
for i in range(n_sets):
pset2.append(pts3d_2[i])
for j in range(pointsperset):
pset2.append(cords2[idx2[i][j]])
pset2 = np.array(pset2)
return(pset1,pset2)
示例4: _hdbscan_prims_kdtree
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def _hdbscan_prims_kdtree(X, min_samples=5, alpha=1.0,
metric='minkowski', p=2, leaf_size=40, gen_min_span_tree=False):
if metric == 'minkowski':
if p is None:
raise TypeError('Minkowski metric given but no p value supplied!')
if p < 0:
raise ValueError('Minkowski metric with negative p value is not defined!')
elif p is None:
p = 2 # Unused, but needs to be integer; assume euclidean
dim = X.shape[0]
min_samples = min(dim - 1, min_samples)
tree = KDTree(X, metric=metric, leaf_size=leaf_size)
dist_metric = DistanceMetric.get_metric(metric)
core_distances = tree.query(X, k=min_samples,
dualtree=True,
breadth_first=True)[0][:, -1]
min_spanning_tree = mst_linkage_core_cdist(X, core_distances, dist_metric, alpha)
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
single_linkage_tree = label(min_spanning_tree)
return single_linkage_tree, None
示例5: __init__
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
class Document:
def __init__(self, embeddings=None, doc_file_name=None, word_index=None, model=None, use_lemma=False):
# Normal case, build kdtree right from embeddings:
if (embeddings== None and (not word_index == None) and (not model == None)):
(idx, embeddings) = Word2VecExecuter.Word2VecLoadWordsHashTable(model, word_index)
embeddings = np.array(embeddings)
elif ((not doc_file_name == None) and (not model == None)):
Features.USE_LEMMA = use_lemma
Features.REMOVE_FEATURES_ONLY_APPEARING_ONE_TIME = False
Features.REMOVE_FEATURES_APPEARING_IN_ONLY_ONE_DOCUMENT = False
words = Features.ReadDependencyParseFile(doc_file_name, funit=Features.FeatureUnits.WORD, remove=False)
(word_index, embeddings) = Word2VecExecuter.Word2VecLoadWordsHashTable(model, words)
embeddings = np.array(embeddings)
del word_index
self.kd_tree = KDTree(normalize(embeddings), leaf_size=30, metric='euclidean')
def distance(self, other, theta=0.5):
if other.__class__ == Document:
(d_self_to_other, i_self_to_other) = self.kd_tree.query(other.kd_tree.data, k=1, return_distance=True)
del i_self_to_other
(d_other_to_self, i_other_to_self) = other.kd_tree.query(self.kd_tree.data, k=1, return_distance=True)
del i_other_to_self
return np.mean(d_self_to_other)*theta + np.mean(d_other_to_self)*(1-theta)
示例6: match
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def match(x,y,mytab):
"""Routine that matches the truth catalog
with the input table
Args:
----
x: `float` RA of the truth objects to match (in degrees)
y: `float` dec of the truth objects to match (in degrees)
mytab: `astropy.table.Table` table containing the L2
input catalog.
Returns:
-------
ind: `int` array of indices to select the truth objects
that match the detected objects
"""
X = np.zeros((len(x),2))
X[:,0]=x
X[:,1]=y
tree = KDTree(X,leaf_size=40)
Y = np.zeros((len(mytab),2))
Y[:,0]=mytab['coord_ra']*180/np.pi
Y[:,1]=mytab['coord_dec']*180/np.pi
dist, ind = tree.query(Y,k=1)
print 'Matches with distance > 1 px, ', np.count_nonzero(dist>1)
return ind
示例7: compute_centroids
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def compute_centroids(X, C):
"""Compute the centroids for dataset X given centers C. Note: centers
C may not belong to X.
"""
tree = KDTree(X)
centroids = tree.query(C, k=1, return_distance=False).squeeze()
return centroids
示例8: compute_labels
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def compute_labels(X, C):
"""Compute the cluster labels for dataset X given centers C.
"""
# labels = np.argmin(pairwise_distances(C, X), axis=0) # THIS REQUIRES TOO MUCH MEMORY FOR LARGE X
tree = KDTree(C)
labels = tree.query(X, k=1, return_distance=False).squeeze()
return labels
示例9: buildDistanceMap
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def buildDistanceMap (self, X, Y):
classes = np.unique(Y)
nClasses = len(classes)
tree = KDTree(X)
nRows = X.shape[0]
TSOri = np.array([]).reshape(0,self.k)
distanceMap = np.array([]).reshape(0,self.k)
labels = np.array([]).reshape(0,self.k)
for row in range(nRows):
distances, indicesOfNeighbors = tree.query(X[row].reshape(1,-1), k = self.k+1)
distances = distances[0][1:]
indicesOfNeighbors = indicesOfNeighbors[0][1:]
distanceMap = np.append(distanceMap, np.array(distances).reshape(1,self.k), axis=0)
labels = np.append(labels, np.array(Y[indicesOfNeighbors]).reshape(1,self.k),axis=0)
for c in classes:
nTraining = np.sum(Y == c)
labelTmp = labels[Y.ravel() == c,:]
tmpKNNClass = labelTmp.ravel()
TSOri = np.append(TSOri, len(tmpKNNClass[tmpKNNClass == c]) / (nTraining*float(self.k)))
return distanceMap, labels, TSOri
示例10: kdtree
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def kdtree(data, lake_matrix, k_neighbors = 10, leaf_size = 20):
# training
kdtree = KDTree(data, leaf_size=leaf_size, metric='euclidean')
# testing
distances, indices = kdtree.query(lake_matrix, k=k_neighbors)
return np.array(indices), distances
示例11: constructLMap
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def constructLMap(self):
self.obstacleArray = []
self.allPositions = []
#build your obstacle array
for i in range( len(self.map.grid) ):
for j in range( len(self.map.grid[0])):
[x, y] = self.map.cell_position(i, j)
if self.map.get_cell(x,y) == 1.0:
self.obstacleArray.append(np.array(self.map.cell_position(i, j)))
#print self.map.cell_position(i, j)
self.allPositions.append(np.array(self.map.cell_position(i, j)))
#pass it into kdtree
eExp = []
kdt = KDTree(self.obstacleArray)
dists = kdt.query(self.allPositions, k=1)[0][:]
self.laserStdDev = self.config["laser_sigma_hit"]
constant = 1.0/( m.sqrt( 2 * m.pi) * self.laserStdDev )
eExp = np.exp(-0.5*( dists**2 )/( self.laserStdDev**2 ) )
probObsGivenLaser = eExp
self.lMap.grid = probObsGivenLaser.reshape(self.lMap.grid.shape)
self.occupancyGridMsg = self.lMap.to_message()
self.lMapPublisher.publish(self.occupancyGridMsg)
示例12: margin_new
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def margin_new(indices, k, X, y):
margins = []
kd_tree = KDTree(X)
for img_index in indices:
margin = 0
dist_to_class = 0
dist_to_others = 0
current_class = y[img_index]
dists, neighbour_indices = kd_tree.query(X[img_index].reshape((1, X[img_index].shape[0])),
k)
classes = {}
for i in xrange(neighbour_indices[0].shape[0]):
index = neighbour_indices[0][i]
if y[index] in classes:
classes[y[index]] += dists[0][i]
else:
classes[y[index]] = dists[0][i]
dist_to_class = classes[current_class]
classes.pop(current_class)
# print classes.items()
if classes:
dist_to_others = min(classes.items(), key=lambda x: x[1])[1]
margin = dist_to_class - dist_to_others
margins.append(margin)
return margins
示例13: match
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def match(x1, y1, x2=None, y2=None, k=5, kdt=None):
X2 = np.vstack([x2, y2]).T
X1 = np.vstack([x1, y1]).T
if kdt is None:
kdt = KDTree(X2, leaf_size=30, metric='euclidean')
dists, inds = kdt.query(X1, k=k, return_distance=True)
return dists, inds, kdt
示例14: test_kdtree_projection
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def test_kdtree_projection(datas):
from sklearn.neighbors import KDTree
from sklearn import random_projection
# datas = parse()
Fs = fingerprints(datas)
# The random projection
transformer = random_projection.GaussianRandomProjection(n_components = 128)
Fs_new = transformer.fit_transform(Fs)
print Fs_new.shape
tree = KDTree(Fs_new, leaf_size=20)
# Select a random target
target_i = random.choice(range(len( datas )))
target = datas[target_i]
Tf = np.vstack([fingerprint(target)])
Tf_new = transformer.transform(Tf)
# Match it
with timer(10):
for _ in xrange(10):
dist, ind = tree.query(Tf_new, k=3)
assert datas[ind[0][0]] == datas[target_i]
示例15: _hdbscan_prims_kdtree
# 需要导入模块: from sklearn.neighbors import KDTree [as 别名]
# 或者: from sklearn.neighbors.KDTree import query [as 别名]
def _hdbscan_prims_kdtree(X, min_samples=5, alpha=1.0,
metric='minkowski', p=2, leaf_size=40, gen_min_span_tree=False):
if metric == 'minkowski':
if p is None:
raise TypeError('Minkowski metric given but no p value supplied!')
if p < 0:
raise ValueError('Minkowski metric with negative p value is not defined!')
elif p is None:
p = 2 # Unused, but needs to be integer; assume euclidean
size = X.shape[0]
min_samples = min(size - 1, min_samples)
tree = KDTree(X, metric=metric, leaf_size=leaf_size)
#TO DO: Deal with p for minkowski appropriately
dist_metric = DistanceMetric.get_metric(metric)
#Get distance to kth nearest neighbour
core_distances = tree.query(X, k=min_samples,
dualtree=True,
breadth_first=True)[0][:, -1]
#Mutual reachability distance is implicite in mst_linkage_core_cdist
min_spanning_tree = mst_linkage_core_cdist(X, core_distances, dist_metric, alpha)
#Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
#Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
return single_linkage_tree, None