本文整理汇总了Python中sklearn.neighbors.kneighbors_graph方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.kneighbors_graph方法的具体用法?Python neighbors.kneighbors_graph怎么用?Python neighbors.kneighbors_graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors
的用法示例。
在下文中一共展示了neighbors.kneighbors_graph方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: text_to_graph
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def text_to_graph(text):
import networkx as nx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import kneighbors_graph
# use tfidf to transform texts into feature vectors
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(text)
# build the graph which is full-connected
N = vectors.shape[0]
mat = kneighbors_graph(vectors, N, metric='cosine', mode='distance', include_self=True)
mat.data = 1 - mat.data # to similarity
g = nx.from_scipy_sparse_matrix(mat, create_using=nx.Graph())
return g
示例2: test_isomap_simple_grid
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
示例3: test_kneighbors_graph_sparse
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
示例4: fit
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def fit(self, X):
"""Fit the clustering model
Parameters
----------
X : array_like
the data to be clustered: shape = [n_samples, n_features]
"""
X = np.asarray(X, dtype=float)
self.X_train_ = X
# generate a sparse graph using the k nearest neighbors of each point
G = kneighbors_graph(X, n_neighbors=self.n_neighbors, mode='distance')
# Compute the minimum spanning tree of this graph
self.full_tree_ = minimum_spanning_tree(G, overwrite=True)
# Find the cluster labels
self.n_components_, self.labels_, self.cluster_graph_ =\
self.compute_clusters()
return self
示例5: _get_adj_from_data
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def _get_adj_from_data(X, k, **kwargs):
"""
Computes adjacency matrix of a K-NN graph from the given data.
:param X: rank 1 np.array, the 2D coordinates of pixels on the grid.
:param kwargs: kwargs for sklearn.neighbors.kneighbors_graph (see docs
[here](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.kneighbors_graph.html)).
:return: scipy sparse matrix.
"""
A = kneighbors_graph(X, k, **kwargs).toarray()
A = sp.csr_matrix(np.maximum(A, A.T))
return A
示例6: test_isomap_reconstruction_error
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
示例7: test_not_fitted_error_gets_raised
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_not_fitted_error_gets_raised():
X = [[1]]
neighbors_ = neighbors.NearestNeighbors()
assert_raises(NotFittedError, neighbors_.kneighbors_graph, X)
assert_raises(NotFittedError, neighbors_.radius_neighbors_graph, X)
示例8: test_kneighbors_graph
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
示例9: test_non_euclidean_kneighbors
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
示例10: test_k_and_radius_neighbors_train_is_not_query
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
示例11: test_k_and_radius_neighbors_X_None
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
示例12: test_include_self_neighbors_graph
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
示例13: test_knn_forcing_backend
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def test_knn_forcing_backend(backend, algorithm):
# Non-regression test which ensure the knn methods are properly working
# even when forcing the global joblib backend.
with parallel_backend(backend):
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm,
n_jobs=3)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.kneighbors(X_test)
clf.kneighbors_graph(X_test, mode='distance').toarray()
示例14: hierarchy
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def hierarchy(feat, n_clusters=2, knn=30):
from sklearn.neighbors import kneighbors_graph
knn_graph = kneighbors_graph(feat, knn, include_self=False)
hierarchy = cluster.AgglomerativeClustering(n_clusters=n_clusters,
connectivity=knn_graph,
linkage='ward').fit(feat)
return hierarchy.labels_
示例15: _calc_scores
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import kneighbors_graph [as 别名]
def _calc_scores(self, x):
graph = kneighbors_graph(
x,
n_neighbors=self.p,
)
# Construct the heat matrix
w = np.zeros([x.shape[0], x.shape[0]])
rows, cols = graph.nonzero()
for i, j in zip(rows, cols):
w[i, j] = math.exp(-np.linalg.norm(x[i] - x[j])**2/self.sigma)
# Compute degree and Laplacian matrices
degree_vector = np.sum(w, 1)
degree = np.diag(degree_vector)
laplacian = degree - w
# Solve the eigen-problem
values, vectors = eigh(laplacian, degree)
smallest = vectors[:, 0:self.clusters].T
# Find coefficients for each cluster
coefs = []
for i in range(self.clusters):
this_coefs = self._create_regressor().fit(x, smallest[i]).coef_
coefs.append(this_coefs)
coefs = np.array(coefs)
# Compute MCFS-scores
scores = np.max(coefs, 0)
return scores