本文整理汇总了Python中sklearn.cluster.SpectralClustering类的典型用法代码示例。如果您正苦于以下问题:Python SpectralClustering类的具体用法?Python SpectralClustering怎么用?Python SpectralClustering使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SpectralClustering类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: spectral_clustering
def spectral_clustering(matrix, N):
spectral = SpectralClustering(n_clusters=N)
clusters = spectral.fit_predict(matrix)
res = [[] for _ in range(N)]
for i, c in enumerate(clusters):
res[c].append(i)
return res
示例2: fast_app_spe_cluster
def fast_app_spe_cluster(data, label, k, n_cluster):
#k-means get the representative points(centers points)
start_time = time.clock()
k_means = KMeans(n_clusters=k)
k_means.fit(data)
y_centers = k_means.cluster_centers_
# get the correspondence table
x_to_centers_table = list()
m = len(data)
for i in range(m):
min_distance = np.inf
min_index = None
for j in range(k):
i_j_dis = np.sum((data[i, :] - y_centers[j, :]) ** 2)
if min_distance > i_j_dis:
min_index = j
min_distance = i_j_dis
x_to_centers_table.append(min_index)
# spectral cluster
spe_cluster = SpectralClustering(n_clusters=n_cluster)
spe_cluster.fit(y_centers)
spe_label = spe_cluster.labels_
# get m-way cluster membership
x_label = list()
for i in range(m):
x_label.append(spe_label[x_to_centers_table[i]])
spend_time = time.clock() - start_time
print("spend time is %f seconds" % spend_time)
return x_label
示例3: create_word2vec_cluster
def create_word2vec_cluster(word2vec_model):
word_vectors = word2vec_model.syn0
num_clusters = word_vectors.shape[0] / 1000
spectral_cluster_model = SpectralClustering(n_clusters=num_clusters)
idx = spectral_cluster_model.fit_predict(word_vectors)
pickle.dump(spectral_cluster_model, open(r"C:\Ofir\Tau\Machine Learning\Project\project\k_means_model.pkl", "wb"))
return spectral_cluster_model
示例4: compute_centroid_set
def compute_centroid_set(self, **kwargs):
INPUT_ITR = subset_iterator(X=self.docv, m=self.subcluster_m, repeats=self.subcluster_repeats)
kn = self.subcluster_kn
clf = SpectralClustering(n_clusters=kn, affinity="precomputed")
C = []
for X in INPUT_ITR:
# Remove any rows that have zero vectors
bad_row_idx = (X ** 2).sum(axis=1) == 0
X = X[~bad_row_idx]
A = cosine_affinity(X)
labels = clf.fit_predict(A)
# Compute the centroids
(N, dim) = X.shape
centroids = np.zeros((kn, dim))
for i in range(kn):
idx = labels == i
mu = X[idx].mean(axis=0)
mu /= np.linalg.norm(mu)
centroids[i] = mu
C.append(centroids)
return np.vstack(C)
示例5: call_spectral
def call_spectral(num_cluster ,mode_, data, update_flag):
X = StandardScaler().fit_transform(data)
spectral = SpectralClustering(n_clusters=num_cluster, eigen_solver='arpack',
affinity='precomputed')
connectivity = kneighbors_graph(X, n_neighbors=10)
connectivity = 0.5 * (connectivity + connectivity.T)
spectral.fit(connectivity)
labels = spectral.labels_
if update_flag:
return labels
label_dict = {}
label_dict_count = 0
for label in labels:
label_dict[str(label_dict_count)] = float(label)
label_dict_count = label_dict_count + 1
print label_dict
unique_dict = {}
unique_dict_count = 0
for uniq in np.unique(labels):
print uniq
unique_dict[str(unique_dict_count)] = float(uniq)
unique_dict_count = unique_dict_count + 1
print unique_dict
return label_dict, unique_dict
示例6: scikit_pca
def scikit_pca(model, rel_wds, plot_lims, title, cluster="kmeans"):
"""
Given a word2vec model and a cluster (choice of "kmeans" or "spectral")
Make a plot of all word-vectors in the model.
"""
X, keys = make_data_matrix(model)
for i, key in enumerate(keys):
X[i,] = model[key]
if cluster == "kmeans":
k_means = KMeans(n_clusters=8)
labels = k_means.fit_predict(X)
elif cluster == "spectral":
sp_clust = SpectralClustering()
labels = sp_clust.fit_predict(X)
# PCA
X_std = StandardScaler().fit_transform(X)
sklearn_pca = PCA(n_components=2)
X_transf = sklearn_pca.fit_transform(X_std)
scatter_plot(X_transf[:,0], X_transf[:,1], rel_wds, labels, title, keys, plot_lims)
return sklearn_pca.explained_variance_ratio_
示例7: spectral_clustering
def spectral_clustering(G, graph_name, num_clusters):
#Find a way to figure out clusters number automatically
subgraphs = []
write_directory = os.path.join(Constants.SPECTRAL_PATH,graph_name)
if not os.path.exists(write_directory):
os.makedirs(write_directory)
nodeList = G.nodes()
matrix_data = nx.to_numpy_matrix(G, nodelist = nodeList)
spectral = SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="rbf")
spectral.fit(matrix_data)
label = spectral.labels_
clusters = {}
for nodeIndex, nodeLabel in enumerate(label):
if nodeLabel not in clusters:
clusters[nodeLabel] = []
clusters[nodeLabel].append(nodeList[nodeIndex])
#countNodes is used to test whether we have all the nodes in the clusters
for clusterIndex, subGraphNodes in enumerate(clusters.keys()):
subgraph = G.subgraph(clusters[subGraphNodes])
subgraphs.append(subgraph)
nx.write_gexf(subgraph, os.path.join(write_directory,graph_name+str(clusterIndex)+"_I"+Constants.GEXF_FORMAT))
#countNodes = countNodes + len(clusters[subGraphNodes])
return subgraphs
示例8: main
def main(cm_file, perm_file, steps, labels_file, limit_classes=None):
"""Run optimization and generate output."""
# Load confusion matrix
with open(cm_file) as f:
cm = json.load(f)
cm = np.array(cm)
# Load labels
if os.path.isfile(labels_file):
with open(labels_file, "r") as f:
labels = json.load(f)
else:
labels = list(range(len(cm)))
n_clusters = 14 # hyperparameter
spectral = SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
spectral.fit(cm)
if hasattr(spectral, 'labels_'):
y_pred = spectral.labels_.astype(np.int)
else:
y_pred = spectral.predict(cm)
sscore = silhouette_score(cm, y_pred)
print("silhouette_score={} with {} clusters"
.format(sscore, n_clusters))
grouping = [[] for _ in range(n_clusters)]
for label, y in zip(labels, y_pred):
grouping[y].append(label)
for group in grouping:
print(" {}: {}".format(len(group), group))
示例9: spectral_clustering
def spectral_clustering(k, X, G, W=None, run_times=5):
if type(W) == type(None):
W = np.eye(len(X))
W2 = np.sqrt(W)
Gtilde = W2.dot(G.dot(W2))
sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
zh = sc.fit_predict(Gtilde)
return zh
示例10: spectral_clustering2
def spectral_clustering2(similarity, concepts=2, euclid=False):
if euclid:
model = SpectralClustering(n_clusters=concepts, affinity='nearest_neighbors')
return model.fit_predict(similarity)
else:
model = SpectralClustering(n_clusters=concepts, affinity='precomputed')
similarity[similarity < 0] = 0
return model.fit_predict(similarity)
示例11: run
def run(self, features, number_of_clusters=2, restarts=10, delta=3.0):
if number_of_clusters == 1:
result = numpy.zeros(len(features), dtype=numpy.int32)
return [result]
classifier = SpectralClustering(k=number_of_clusters, n_init=restarts)
similarity = get_similarity(features, delta)
classifier.fit(similarity)
return [classifier.labels_]
示例12: get_coregulatory_states
def get_coregulatory_states(corr_matrices, similarity_matrix, n_clusters):
spectral = SpectralClustering(n_clusters=n_clusters, affinity='precomputed')
labels = spectral.fit_predict(similarity_matrix)
coreg_states = {}
for ci in np.unique(labels):
coreg_states[ci] = corr_matrices[labels == ci, :, :].mean(axis=0)
return coreg_states, labels
示例13: dist_spectral
def dist_spectral(x, y):
plot = []
for s in range(dataset.shape[0]):
plot.append(np.array([x[s], y[s]]))
plot = np.array(plot)
spectral = SpectralClustering(n_clusters=3, eigen_solver='arpack', affinity="nearest_neighbors")
clusters = spectral.fit_predict(plot)
return clusters
示例14: spectral
def spectral(k, X, G, run_times=10):
"""Spectral clustering from sklearn library.
run_times is the number of times the algorithm is gonna run with different
initializations.
"""
sc = SpectralClustering(k, affinity='precomputed', n_init=run_times)
zh = sc.fit_predict(G)
return zh
示例15: spectral_clustering
def spectral_clustering(S,X,config):
'''
Computes spectral clustering from an input similarity matrix.
Returns the labels associated with the clustering.
'''
from sklearn.cluster import SpectralClustering
nk = int(config["n_clusters"])
clf = SpectralClustering(affinity='cosine',n_clusters=nk)
return clf.fit_predict(X)