本文整理汇总了Python中sklearn.cluster.SpectralClustering.fit_predict方法的典型用法代码示例。如果您正苦于以下问题:Python SpectralClustering.fit_predict方法的具体用法?Python SpectralClustering.fit_predict怎么用?Python SpectralClustering.fit_predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.cluster.SpectralClustering
的用法示例。
在下文中一共展示了SpectralClustering.fit_predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: spectral_clustering2
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral_clustering2(similarity, concepts=2, euclid=False):
if euclid:
model = SpectralClustering(n_clusters=concepts, affinity='nearest_neighbors')
return model.fit_predict(similarity)
else:
model = SpectralClustering(n_clusters=concepts, affinity='precomputed')
similarity[similarity < 0] = 0
return model.fit_predict(similarity)
示例2: run
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def run(self, k):
if self.data_is_kernel:
clf = SpectralClustering(n_clusters=k, gamma=self.gammav, affinity='precomputed')
self.allocation = clf.fit_predict(self.X)
self.kernel = self.X
else:
clf = SpectralClustering(n_clusters=k, gamma=self.gammav) #, affinity='precomputed'
self.allocation = clf.fit_predict(self.X)
self.kernel = clf.affinity_matrix_
return self.allocation
示例3: compute_centroid_set
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def compute_centroid_set(self, **kwargs):
INPUT_ITR = subset_iterator(X=self.docv, m=self.subcluster_m, repeats=self.subcluster_repeats)
kn = self.subcluster_kn
clf = SpectralClustering(n_clusters=kn, affinity="precomputed")
C = []
for X in INPUT_ITR:
# Remove any rows that have zero vectors
bad_row_idx = (X ** 2).sum(axis=1) == 0
X = X[~bad_row_idx]
A = cosine_affinity(X)
labels = clf.fit_predict(A)
# Compute the centroids
(N, dim) = X.shape
centroids = np.zeros((kn, dim))
for i in range(kn):
idx = labels == i
mu = X[idx].mean(axis=0)
mu /= np.linalg.norm(mu)
centroids[i] = mu
C.append(centroids)
return np.vstack(C)
示例4: create_word2vec_cluster
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def create_word2vec_cluster(word2vec_model):
word_vectors = word2vec_model.syn0
num_clusters = word_vectors.shape[0] / 1000
spectral_cluster_model = SpectralClustering(n_clusters=num_clusters)
idx = spectral_cluster_model.fit_predict(word_vectors)
pickle.dump(spectral_cluster_model, open(r"C:\Ofir\Tau\Machine Learning\Project\project\k_means_model.pkl", "wb"))
return spectral_cluster_model
示例5: spectral_clustering
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral_clustering(matrix, N):
spectral = SpectralClustering(n_clusters=N)
clusters = spectral.fit_predict(matrix)
res = [[] for _ in range(N)]
for i, c in enumerate(clusters):
res[c].append(i)
return res
示例6: scikit_pca
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def scikit_pca(model, rel_wds, plot_lims, title, cluster="kmeans"):
"""
Given a word2vec model and a cluster (choice of "kmeans" or "spectral")
Make a plot of all word-vectors in the model.
"""
X, keys = make_data_matrix(model)
for i, key in enumerate(keys):
X[i,] = model[key]
if cluster == "kmeans":
k_means = KMeans(n_clusters=8)
labels = k_means.fit_predict(X)
elif cluster == "spectral":
sp_clust = SpectralClustering()
labels = sp_clust.fit_predict(X)
# PCA
X_std = StandardScaler().fit_transform(X)
sklearn_pca = PCA(n_components=2)
X_transf = sklearn_pca.fit_transform(X_std)
scatter_plot(X_transf[:,0], X_transf[:,1], rel_wds, labels, title, keys, plot_lims)
return sklearn_pca.explained_variance_ratio_
示例7: spectral_clustering
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral_clustering(k, X, G, W=None, run_times=5):
if type(W) == type(None):
W = np.eye(len(X))
W2 = np.sqrt(W)
Gtilde = W2.dot(G.dot(W2))
sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
zh = sc.fit_predict(Gtilde)
return zh
示例8: get_coregulatory_states
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def get_coregulatory_states(corr_matrices, similarity_matrix, n_clusters):
spectral = SpectralClustering(n_clusters=n_clusters, affinity='precomputed')
labels = spectral.fit_predict(similarity_matrix)
coreg_states = {}
for ci in np.unique(labels):
coreg_states[ci] = corr_matrices[labels == ci, :, :].mean(axis=0)
return coreg_states, labels
示例9: dist_spectral
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def dist_spectral(x, y):
plot = []
for s in range(dataset.shape[0]):
plot.append(np.array([x[s], y[s]]))
plot = np.array(plot)
spectral = SpectralClustering(n_clusters=3, eigen_solver='arpack', affinity="nearest_neighbors")
clusters = spectral.fit_predict(plot)
return clusters
示例10: spectral
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral(k, X, G, run_times=10):
"""Spectral clustering from sklearn library.
run_times is the number of times the algorithm is gonna run with different
initializations.
"""
sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
zh = sc.fit_predict(G)
return zh
示例11: spectral_clustering
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral_clustering(S,X,config):
'''
Computes spectral clustering from an input similarity matrix.
Returns the labels associated with the clustering.
'''
from sklearn.cluster import SpectralClustering
nk = int(config["n_clusters"])
clf = SpectralClustering(affinity='cosine',n_clusters=nk)
return clf.fit_predict(X)
示例12: spectral
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral(k, X, G, z, run_times=10):
"""Spectral clustering from sklearn library.
run_times is the number of times the algorithm is gonna run with different
initializations.
"""
sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
zh = sc.fit_predict(G)
a = metric.accuracy(z, zh)
v = metric.variation_information(z, zh)
return a, v
示例13: cluster_faces_CNN
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def cluster_faces_CNN(name = '[email protected]', img_list = 'faces_list.txt'):
root = '/Users/wangyufei/Documents/Study/intern_adobe/face_recognition_CNN/'+name + '/'
f = open(root + model_name + 'similarity_matrix.cPickle','r')
affinity_matrix = cPickle.load(f)
f.close()
f = SpectralClustering(affinity='precomputed', n_clusters=min(8, affinity_matrix.shape[0] - 1), eigen_solver = 'arpack', n_neighbors=min(5, affinity_matrix.shape[0]))
a = f.fit_predict(affinity_matrix)
groups = {}
temp = zip(a, xrange(len(a)))
for i in temp:
if i[0] not in groups:
groups[i[0]] = [i[1]]
else:
groups[i[0]].append(i[1])
unique_person_id = []
for kk in groups:
min_similarity = np.Inf
max_similarity = -np.Inf
mean_similarity = 0
this_group_ids = groups[kk]
for j in xrange(len(this_group_ids)):
for i in xrange(j+1, len(this_group_ids)):
temp = affinity_matrix[this_group_ids[i],this_group_ids[j]]
if temp < min_similarity:
min_similarity = temp
if temp > max_similarity:
max_similarity = temp
mean_similarity += temp
mean_similarity /= max(1, len(this_group_ids)*(len(this_group_ids) - 1) / 2)
print len(this_group_ids), mean_similarity, max_similarity, min_similarity
if mean_similarity > 0.5:
unique_person_id.append(kk)
important_person = []
for i in unique_person_id:
important_person.append([i, len(groups[i])])
important_person.sort(key = lambda x:x[1], reverse=True)
in_path = root + img_list
imgs_list = []
with open(in_path, 'r') as data:
for line in data:
line = line[:-1]
imgs_list.append(line.split('/')[-1])
temp = zip(a, imgs_list)
face_groups = {}
for i in temp:
if i[0] not in face_groups:
face_groups[i[0]] = [i[1]]
else:
face_groups[i[0]].append(i[1])
create_face_group_html_CNN(name, face_groups, important_person)
示例14: spectral_clustering
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def spectral_clustering(crime_rows, column_names, num_clusters, affinity='rbf', n_neighbors=0,
assign_labels='kmeans'):
"""
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default ‘rbf’
If a string, this may be one of ‘nearest_neighbors’, ‘precomputed’, ‘rbf’
or one of the kernels supported by sklearn.metrics.pairwise_kernels.
Only kernels that produce similarity scores
(non-negative values that increase with similarity) should be used.
This property is not checked by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel.
Ignored for affinity='nearest_neighbors'.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix
using the nearest neighbors method. Ignored for affinity='rbf'.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds.
The final results will be the best output of n_init consecutive runs in
terms of inertia.
assign_labels : {‘kmeans’, ‘discretize’}, default: ‘kmeans’
The strategy to use to assign labels in the embedding space.
There are two ways to assign labels after the laplacian embedding.
k-means can be applied and is a popular choice.
But it can also be sensitive to initialization.
Discretization is another approach which is less sensitive to
random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed
as callable object. Ignored by other kernels.
"""
crime_xy = [crime[0:2] for crime in crime_rows]
crime_info = [crime[2:] for crime in crime_rows]
#crime_xy = [crime[1:] for crime in crime_rows]
spectral_clustering = SpectralClustering(
n_clusters=num_clusters,
affinity=affinity,
n_neighbors=n_neighbors,
assign_labels=assign_labels)
print("Running spectral clustering....")
print("length crimexy")
print(len(crime_xy))
spectral_clustering_labels = spectral_clustering.fit_predict(
random_sampling(crime_xy, num_samples=3000))
print("Formatting......")
return _format_clustering(spectral_clustering_labels, crime_xy, crime_info,
column_names, num_clusters=num_clusters)
示例15: predictSpectralClustering
# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit_predict [as 别名]
def predictSpectralClustering(X, y, n=2, val='rbf'):
ranX, ranY = shuffle(X, y, random_state=0)
X = X[:600,]
y = y[:600,]
sc = SpectralClustering(n_clusters=n)
results = sc.fit_predict(X)
gini = compute_gini(results)
if n == 2:
same = calculate_score(results, y)
opp = calculate_score(results, y, True)
return (results, max(same, opp), gini)
else:
return (results, 0, gini)