當前位置: 首頁>>代碼示例>>Python>>正文


Python cluster.SpectralClustering方法代碼示例

本文整理匯總了Python中sklearn.cluster.SpectralClustering方法的典型用法代碼示例。如果您正苦於以下問題:Python cluster.SpectralClustering方法的具體用法?Python cluster.SpectralClustering怎麽用?Python cluster.SpectralClustering使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.cluster的用法示例。


在下文中一共展示了cluster.SpectralClustering方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: post_proC

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def post_proC(C, K, d, alpha):
    # C: coefficient matrix, K: number of clusters, d: dimension of each subspace
    C = 0.5*(C + C.T)
    r = min(d*K + 1, C.shape[0]-1)      
    U, S, _ = svds(C, r, v0=np.ones(C.shape[0]))
    U = U[:,::-1]    
    S = np.sqrt(S[::-1])
    S = np.diag(S)    
    U = U.dot(S)    
    U = normalize(U, norm='l2', axis = 1)       
    Z = U.dot(U.T)
    Z = Z * (Z>0)    
    L = np.abs(Z ** alpha) 
    L = L/L.max()   
    L = 0.5 * (L + L.T)    
    spectral = cluster.SpectralClustering(n_clusters=K, eigen_solver='arpack', affinity='precomputed', assign_labels='discretize', random_state=66)
    spectral.fit(L)
    grp = spectral.fit_predict(L) + 1
    return grp, L 
開發者ID:huybery,項目名稱:MvDSCN,代碼行數:21,代碼來源:metric.py

示例2: test_spectral_clustering

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def test_spectral_clustering(eigen_solver, assign_labels):
    S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
                  [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
                  [1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
                  [0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
                  [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
                  [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
                  [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])

    for mat in (S, sparse.csr_matrix(S)):
        model = SpectralClustering(random_state=0, n_clusters=2,
                                   affinity='precomputed',
                                   eigen_solver=eigen_solver,
                                   assign_labels=assign_labels
                                   ).fit(mat)
        labels = model.labels_
        if labels[0] == 0:
            labels = 1 - labels

        assert adjusted_rand_score(labels, [1, 1, 1, 0, 0, 0, 0]) == 1

        model_copy = pickle.loads(pickle.dumps(model))
        assert model_copy.n_clusters == model.n_clusters
        assert model_copy.eigen_solver == model.eigen_solver
        assert_array_equal(model_copy.labels_, model.labels_) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:27,代碼來源:test_spectral.py

示例3: spectral_clustering

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def spectral_clustering(n_clusters, samples, size=False):

    """
    Run k-means clustering on vertex coordinates.

    Parameters:
    - - - - -
    n_clusters : int
        number of clusters to generate
    samples : array
        adjacency matrix of surface or region
    """

    # Run Spectral Clustering
    spectral = cluster.SpectralClustering(
        n_clusters=n_clusters, affinity='precomputed')
    spectral.fit(samples)

    labels = spectral.labels_.copy()
    labels = labels.astype(np.int32)+1

    return labels 
開發者ID:miykael,項目名稱:parcellation_fragmenter,代碼行數:24,代碼來源:clusterings.py

示例4: test_objectmapper

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.cluster.AffinityPropagation, cluster.AffinityPropagation)
        self.assertIs(df.cluster.AgglomerativeClustering, cluster.AgglomerativeClustering)
        self.assertIs(df.cluster.Birch, cluster.Birch)
        self.assertIs(df.cluster.DBSCAN, cluster.DBSCAN)
        self.assertIs(df.cluster.FeatureAgglomeration, cluster.FeatureAgglomeration)
        self.assertIs(df.cluster.KMeans, cluster.KMeans)
        self.assertIs(df.cluster.MiniBatchKMeans, cluster.MiniBatchKMeans)
        self.assertIs(df.cluster.MeanShift, cluster.MeanShift)
        self.assertIs(df.cluster.SpectralClustering, cluster.SpectralClustering)

        self.assertIs(df.cluster.bicluster.SpectralBiclustering,
                      cluster.bicluster.SpectralBiclustering)
        self.assertIs(df.cluster.bicluster.SpectralCoclustering,
                      cluster.bicluster.SpectralCoclustering) 
開發者ID:pandas-ml,項目名稱:pandas-ml,代碼行數:18,代碼來源:test_cluster.py

示例5: computeSourceNodes

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def computeSourceNodes(A, C):
    """
    computeSourceNodes: compute source nodes for the source localization problem
    
    Input:
        A (np.array): adjacency matrix of shape N x N
        C (int): number of classes
        
    Output:
        sourceNodes (list): contains the indices of the C source nodes
        
    Uses the adjacency matrix to compute C communities by means of spectral 
    clustering, and then selects the node with largest degree within each 
    community
    """
    sourceNodes = []
    degree = np.sum(A, axis = 0) # degree of each vector
    # Compute communities
    communityClusters = SpectralClustering(n_clusters = C,
                                           affinity = 'precomputed',
                                           assign_labels = 'discretize')
    communityClusters = communityClusters.fit(A)
    communityLabels = communityClusters.labels_
    # For each community
    for c in range(C):
        communityNodes = np.nonzero(communityLabels == c)[0]
        degreeSorted = np.argsort(degree[communityNodes])
        sourceNodes = sourceNodes + [communityNodes[degreeSorted[-1]]]
    
    return sourceNodes 
開發者ID:alelab-upenn,項目名稱:graph-neural-networks,代碼行數:32,代碼來源:graphTools.py

示例6: test_spectral_unknown_mode

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def test_spectral_unknown_mode():
    # Test that SpectralClustering fails with an unknown mode set.
    centers = np.array([
        [0., 0., 0.],
        [10., 10., 10.],
        [20., 20., 20.],
    ])
    X, true_labels = make_blobs(n_samples=100, centers=centers,
                                cluster_std=1., random_state=42)
    D = pairwise_distances(X)  # Distance matrix
    S = np.max(D) - D  # Similarity matrix
    S = sparse.coo_matrix(S)
    assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
                  random_state=0, eigen_solver="<unknown>") 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:16,代碼來源:test_spectral.py

示例7: test_spectral_unknown_assign_labels

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def test_spectral_unknown_assign_labels():
    # Test that SpectralClustering fails with an unknown assign_labels set.
    centers = np.array([
        [0., 0., 0.],
        [10., 10., 10.],
        [20., 20., 20.],
    ])
    X, true_labels = make_blobs(n_samples=100, centers=centers,
                                cluster_std=1., random_state=42)
    D = pairwise_distances(X)  # Distance matrix
    S = np.max(D) - D  # Similarity matrix
    S = sparse.coo_matrix(S)
    assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
                  random_state=0, assign_labels="<unknown>") 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:16,代碼來源:test_spectral.py

示例8: test_spectral_clustering_sparse

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def test_spectral_clustering_sparse():
    X, y = make_blobs(n_samples=20, random_state=0,
                      centers=[[1, 1], [-1, -1]], cluster_std=0.01)

    S = rbf_kernel(X, gamma=1)
    S = np.maximum(S - 1e-4, 0)
    S = sparse.coo_matrix(S)

    labels = SpectralClustering(random_state=0, n_clusters=2,
                                affinity='precomputed').fit(S).labels_
    assert adjusted_rand_score(y, labels) == 1 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:13,代碼來源:test_spectral.py

示例9: spectral

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def spectral(feat, n_clusters=2):
    spectral = cluster.SpectralClustering(n_clusters=n_clusters,
                                        assign_labels="discretize",
                                        affinity="nearest_neighbors",
                                        random_state=0).fit(feat)
    return spectral.labels_ 
開發者ID:XiaohangZhan,項目名稱:cdp,代碼行數:8,代碼來源:baseline_clustering.py

示例10: performClustering

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def performClustering(self):
		print('start clustering...')
		KET=self.KET
		# default clustering model
		[dCK,dBS]=self.getClusteringPars()
		#pdb.set_trace()
		AC=[]
		gc.collect()
		for i in range(len(KET)):
			print("clustering for time: "+str(KET[i]))
			ti=KET[i]
			CT = self.dET[ti]
			CKT=dCK[ti]
			BST=dBS[ti]
			
			if CKT > 1:
				if (self.largeType=='1' or self.largeType=='True'):
					X=copy.deepcopy(self.affMatrix[ti])
					SC = KMeans(n_clusters=CKT, random_state=BST)
				else:
					X=copy.deepcopy(self.affMatrix[ti])
					SC = SpectralClustering(n_clusters=CKT, random_state=BST)
				
				
				SC.fit(X)
				Y = SC.labels_
				
				for j in range(len(CT)):
					CT[j].Label = Y[j]
				CC = [Cluster([item for item in CT if item.Label == j], ti, str(ti) + '_' + str(j)) for j in range(CKT)]
				AC += CC
			else:
				for j in range(len(CT)):
					CT[j].Label = 0
				CC = [Cluster([item for item in CT if item.Label == 0], ti, str(ti)+'_'+str(0))]
				AC += CC
		return AC

# cluster 
開發者ID:phoenixding,項目名稱:scdiff,代碼行數:41,代碼來源:scdiff.py

示例11: transform_rays_model_cdf_spectral

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def transform_rays_model_cdf_spectral(list_rays, nb_components=5):
    """ compute the mixture model and transform it into cumulative distribution

    :param list(list(int)) list_rays: list ray features (distances)
    :param int nb_components: number components in mixture model
    :return tuple(any,list(list(int))):  mixture model, list of stat/param of models

    >>> np.random.seed(0)
    >>> list_rays = [[9, 4, 9], [4, 9, 7], [9, 7, 11], [10, 8, 10],
    ...              [9, 11, 8], [4, 8, 5], [8, 10, 6], [9, 7, 11]]
    >>> mm, cdist = transform_rays_model_cdf_spectral(list_rays)
    >>> np.round(cdist, 1).tolist()  # doctest: +NORMALIZE_WHITESPACE
    [[1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.8, 0.6, 0.5, 0.2, 0.0],
     [1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.9, 0.7, 0.5, 0.2, 0.0],
     [1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.8, 0.7, 0.5, 0.3, 0.0]]
    """
    rays = np.array(list_rays)
    sc = cluster.SpectralClustering(nb_components)
    sc.fit(rays)
    logging.debug('SpectralClustering found % components with counts: %r',
                  len(np.unique(sc.labels_)), np.bincount(sc.labels_))

    labels = sc.labels_
    means = np.zeros((len(np.unique(labels)), rays.shape[1]))
    stds = np.zeros((len(means), rays.shape[1]))
    for i, lb in enumerate(np.unique(labels)):
        means[i, :] = np.mean(np.asarray(list_rays)[labels == lb], axis=0)
        means[i, :] = ndimage.filters.gaussian_filter1d(means[i, :], 1)
        stds[i, :] = np.std(np.asarray(list_rays)[labels == lb], axis=0)
    stds += 1
    weights = np.bincount(sc.labels_) / float(len(sc.labels_))

    # compute the fairest mean + sigma over all components and ray angles
    max_dist = np.max([[m[i] + c[i] for i in range(len(m))]
                       for m, c in zip(means, stds)])

    cdist = compute_cumulative_distrib(means, stds, weights, max_dist)
    return sc, cdist.tolist() 
開發者ID:Borda,項目名稱:pyImSegm,代碼行數:40,代碼來源:region_growing.py

示例12: __init__

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def __init__(self, options):
        self.handle_options(options)

        out_params = convert_params(
            options.get('params', {}),
            floats=['gamma'],
            strs=['affinity'],
            ints=['k', 'random_state'],
            aliases={'k': 'n_clusters'},
        )

        self.estimator = _SpectralClustering(**out_params)
        self.scaler = StandardScaler() 
開發者ID:nccgroup,項目名稱:Splunking-Crime,代碼行數:15,代碼來源:SpectralClustering.py

示例13: cluster

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def cluster(points, algorithm=DBSCAN):
    print("Running {}...".format(algorithm))
    if algorithm == "KMeans":
        # not good at finding clusters if close together
        labels = KMeans(n_clusters=2, random_state=0, n_jobs=-1).fit_predict(points)
    elif algorithm == "DBSCAN":
        # no fixed number of labels; slow with high eps
        labels = DBSCAN(eps=3.0, n_jobs=-1).fit_predict(points)
    # labels = SpectralClustering(n_clusters=2, n_jobs=-1).fit_predict(points)  # slow (> 1min)
    # labels = AgglomerativeClustering(n_clusters=2).fit_predict(points)  # fast
    points_start, points_end = select_two_biggest_clusters(labels, points)
    return points_start, points_end 
開發者ID:MIC-DKFZ,項目名稱:TractSeg,代碼行數:14,代碼來源:create_endpoints_mask_with_clustering.py

示例14: compare

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def compare(data, n_groups, output_fol):
    # plot_clusters(data.astype(np.float), scipy.cluster.vq.kmeans, 'scipy.cluster.vq.kmeans', output_fol, (n_groups,), {})
    plot_clusters(data, cluster.KMeans, 'KMeans', output_fol, (), {'n_clusters': n_groups})
    for ct in ['spherical', 'tied', 'diag', 'full']:
        plot_clusters(data, mixture.GaussianMixture, 'GMM_{}'.format(ct), output_fol, (),
                      {'n_components': n_groups, 'covariance_type': ct})
    plot_clusters(data, cluster.AffinityPropagation, 'AffinityPropagation', output_fol, (), {'preference': -5.0, 'damping': 0.95})
    plot_clusters(data, cluster.MeanShift, 'MeanShift', output_fol, (0.175,), {'cluster_all': False})
    plot_clusters(data, cluster.SpectralClustering, 'SpectralClustering', output_fol, (), {'n_clusters': n_groups})
    plot_clusters(data, cluster.AgglomerativeClustering, 'AgglomerativeClustering', output_fol, (), {'n_clusters': n_groups, 'linkage': 'ward'})
    plot_clusters(data, cluster.DBSCAN, 'DBSCAN', output_fol, (), {'eps': 0.025})
    # plot_clusters(data, hdbscan.HDBSCAN, 'HDBSCAN', output_fol, (), {'min_cluster_size': 15}) 
開發者ID:pelednoam,項目名稱:mmvt,代碼行數:14,代碼來源:compare_clustering_algs.py

示例15: spectral

# 需要導入模塊: from sklearn import cluster [as 別名]
# 或者: from sklearn.cluster import SpectralClustering [as 別名]
def spectral(feat, n_clusters, **kwargs):
    spectral = cluster.SpectralClustering(n_clusters=n_clusters,
                                          assign_labels="discretize",
                                          affinity="nearest_neighbors",
                                          random_state=0).fit(feat)
    return spectral.labels_ 
開發者ID:yl-1993,項目名稱:learn-to-cluster,代碼行數:8,代碼來源:sklearn_cluster.py


注:本文中的sklearn.cluster.SpectralClustering方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。