当前位置: 首页>>代码示例>>Python>>正文


Python SpectralClustering.fit方法代码示例

本文整理汇总了Python中sklearn.cluster.SpectralClustering.fit方法的典型用法代码示例。如果您正苦于以下问题:Python SpectralClustering.fit方法的具体用法?Python SpectralClustering.fit怎么用?Python SpectralClustering.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.cluster.SpectralClustering的用法示例。


在下文中一共展示了SpectralClustering.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: spectral_clustering

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def spectral_clustering(G, graph_name, num_clusters):
    #Find a way to figure out clusters number automatically
    subgraphs = []
    write_directory = os.path.join(Constants.SPECTRAL_PATH,graph_name)
    if not os.path.exists(write_directory):
        os.makedirs(write_directory)
    nodeList = G.nodes()
    matrix_data = nx.to_numpy_matrix(G, nodelist = nodeList)
    spectral = SpectralClustering(n_clusters=2,
                                          eigen_solver='arpack',
                                          affinity="rbf")   
    spectral.fit(matrix_data)
    label = spectral.labels_
    clusters = {}
    
    for nodeIndex, nodeLabel in enumerate(label):
        if nodeLabel not in clusters:
            clusters[nodeLabel] = []
        clusters[nodeLabel].append(nodeList[nodeIndex])
        
    #countNodes is used to test whether we have all the nodes in the clusters 
   
    for clusterIndex, subGraphNodes in enumerate(clusters.keys()):
        subgraph = G.subgraph(clusters[subGraphNodes])
        subgraphs.append(subgraph)
        nx.write_gexf(subgraph, os.path.join(write_directory,graph_name+str(clusterIndex)+"_I"+Constants.GEXF_FORMAT))
        #countNodes = countNodes + len(clusters[subGraphNodes])
    return subgraphs
开发者ID:subincm,项目名称:hierarchical_nw_align,代码行数:30,代码来源:spectral_clustering.py

示例2: call_spectral

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def call_spectral(num_cluster ,mode_, data, update_flag):
    X = StandardScaler().fit_transform(data)
    spectral = SpectralClustering(n_clusters=num_cluster, eigen_solver='arpack', 
                                                        affinity='precomputed')
    connectivity = kneighbors_graph(X, n_neighbors=10)
    connectivity = 0.5 * (connectivity + connectivity.T)
    spectral.fit(connectivity)
    labels = spectral.labels_

    if update_flag:
        return labels


    label_dict = {}
    label_dict_count = 0
    for label in labels:
       label_dict[str(label_dict_count)] = float(label)
       label_dict_count = label_dict_count + 1
    print label_dict

    unique_dict = {}
    unique_dict_count = 0
    for uniq in np.unique(labels):
       print uniq
       unique_dict[str(unique_dict_count)] = float(uniq)
       unique_dict_count = unique_dict_count + 1
    print unique_dict

    return label_dict, unique_dict
开发者ID:benaneesh,项目名称:cluster,代码行数:31,代码来源:algorithm_manager.py

示例3: main

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def main(cm_file, perm_file, steps, labels_file, limit_classes=None):
    """Run optimization and generate output."""
    # Load confusion matrix
    with open(cm_file) as f:
        cm = json.load(f)
        cm = np.array(cm)

    # Load labels
    if os.path.isfile(labels_file):
        with open(labels_file, "r") as f:
            labels = json.load(f)
    else:
        labels = list(range(len(cm)))

    n_clusters = 14  # hyperparameter
    spectral = SpectralClustering(n_clusters=n_clusters,
                                  eigen_solver='arpack',
                                  affinity="nearest_neighbors")
    spectral.fit(cm)
    if hasattr(spectral, 'labels_'):
        y_pred = spectral.labels_.astype(np.int)
    else:
        y_pred = spectral.predict(cm)
    sscore = silhouette_score(cm, y_pred)
    print("silhouette_score={} with {} clusters"
          .format(sscore, n_clusters))
    grouping = [[] for _ in range(n_clusters)]
    for label, y in zip(labels, y_pred):
        grouping[y].append(label)
    for group in grouping:
        print("  {}: {}".format(len(group), group))
开发者ID:directorscut82,项目名称:msthesis-experiments,代码行数:33,代码来源:spektral_clust.py

示例4: fast_app_spe_cluster

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def fast_app_spe_cluster(data, label, k, n_cluster):
    #k-means get the representative points(centers points)
    start_time = time.clock()
    k_means = KMeans(n_clusters=k)
    k_means.fit(data)
    y_centers = k_means.cluster_centers_
    # get the correspondence table
    x_to_centers_table = list()
    m = len(data)
    for i in range(m):
        min_distance = np.inf
        min_index = None
        for j in range(k):
            i_j_dis = np.sum((data[i, :] - y_centers[j, :]) ** 2)
            if min_distance > i_j_dis:
                min_index = j
                min_distance = i_j_dis
        x_to_centers_table.append(min_index)
    # spectral cluster
    spe_cluster = SpectralClustering(n_clusters=n_cluster)
    spe_cluster.fit(y_centers)
    spe_label = spe_cluster.labels_
    # get m-way cluster membership
    x_label = list()
    for i in range(m):
        x_label.append(spe_label[x_to_centers_table[i]])
    spend_time = time.clock() - start_time
    print("spend time is %f seconds" % spend_time)
    return x_label
开发者ID:Yayong-guan,项目名称:mlcode,代码行数:31,代码来源:fast_appromate_spe_cluster.py

示例5: run

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
 def run(self, features, number_of_clusters=2, restarts=10, delta=3.0):
     if number_of_clusters == 1:
         result = numpy.zeros(len(features), dtype=numpy.int32)
         return [result]
     classifier = SpectralClustering(k=number_of_clusters, n_init=restarts)
     similarity = get_similarity(features, delta)
     classifier.fit(similarity)
     return [classifier.labels_]
开发者ID:jspobst,项目名称:spikepy,代码行数:10,代码来源:clustering_spectral_sklearn.py

示例6: test_affinities

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def test_affinities():
    X, y = make_blobs(n_samples=40, random_state=1, centers=[[1, 1], [-1, -1]], cluster_std=0.4)
    # nearest neighbors affinity
    sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)
开发者ID:GbalsaC,项目名称:bitnamiP,代码行数:12,代码来源:test_spectral.py

示例7: run_clustering

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def run_clustering(methods, cases):
    true_method_groups = [m[1] for m in methods]
    edge_model = GraphLassoCV(alphas=4, n_refinements=5, n_jobs=3, max_iter=100)
    edge_model.fit(cases)
    CV = edge_model.covariance_
    
    num_clusters=3
    spectral = SpectralClustering(n_clusters=num_clusters,affinity='precomputed') 
    spectral.fit(np.asarray(CV))
    spec_sort=np.argsort(spectral.labels_)
    
    for i,m in enumerate(methods):
        print "%s:%d\t%s"%(m[1],spectral.labels_[i],m[0])
    print "Adj. Rand Score: %f"%adjusted_rand_score(spectral.labels_,true_method_groups)
开发者ID:IDEALLab,项目名称:design_method_recommendation_JMD_2014,代码行数:16,代码来源:paper_experiments.py

示例8: spectral

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def spectral(X, num_clusters):
    """
    Spectral Clustering on X for response y
    Returns array of cluster groups
    """
    model = SpectralClustering(
        n_clusters=num_clusters,
        eigen_solver="arpack",
        affinity="nearest_neighbors",
        n_neighbors=4,
        assign_labels="discretize",
    )
    cleanX = preprocessing.scale(X.as_matrix())
    model.fit(cleanX)
    return model.labels_
开发者ID:matt-leach,项目名称:flask-cluster,代码行数:17,代码来源:__init__.py

示例9: eval_k

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def eval_k(max_k):
    a_score, idx = [], []
    for k in xrange(2, max_k + 1):
        print 'k={}'.format(k)
        est = SpectralClustering(n_clusters=k, affinity='nearest_neighbors')
#         est = SpectralClustering(n_clusters=k, affinity='rbf', gamma=0.00001)
        est.fit(x)
        ari = metrics.adjusted_rand_score(y, est.labels_)
        print ari
        a_score.append(ari)
        idx.append(k)
    pl.plot(idx, a_score)
    pl.xlabel('# of clusters')
    pl.ylabel('ARI')
    pl.show()
开发者ID:harrylclc,项目名称:ist557,代码行数:17,代码来源:spectral.py

示例10: test_affinities

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def test_affinities():
    X, y = make_blobs(n_samples=40, random_state=1, centers=[[1, 1], [-1, -1]],
                      cluster_std=0.4)
    # nearest neighbors affinity
    sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
                            random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    # raise error on unknown affinity
    sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
    assert_raises(ValueError, sp.fit, X)
开发者ID:coreylynch,项目名称:scikit-learn,代码行数:18,代码来源:test_spectral.py

示例11: spectral

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def spectral(x, num_clusters):
  spec = SpectralClustering(
    affinity='rbf', # 'rbf'
    n_clusters=num_clusters,
    n_init=10,
    assign_labels='kmeans', 
    gamma=1.0, 
    degree=3, 
    coef0=1
  )
  spec.fit(x)

  c = spec.labels_
  k = len(np.unique(c))

  return spec, (None, c, k)
开发者ID:jakobjoachim,项目名称:text-mining-haw-bachelor,代码行数:18,代码来源:spectral.py

示例12: test_affinities

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def test_affinities():
    # Note: in the following, random_state has been selected to have
    # a dataset that yields a stable eigen decomposition both when built
    # on OSX and Linux
    X, y = make_blobs(n_samples=40, random_state=2, centers=[[1, 1], [-1, -1]], cluster_std=0.4)
    # nearest neighbors affinity
    sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    # raise error on unknown affinity
    sp = SpectralClustering(n_clusters=2, affinity="<unknown>")
    assert_raises(ValueError, sp.fit, X)
开发者ID:vd4mmind,项目名称:scikit-learn,代码行数:19,代码来源:test_spectral.py

示例13: test_affinities

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def test_affinities():
    # Note: in the following, random_state has been selected to have
    # a dataset that yields a stable eigen decomposition both when built
    # on OSX and Linux
    X, y = make_blobs(n_samples=20, random_state=0,
                      centers=[[1, 1], [-1, -1]], cluster_std=0.01
                      )
    # nearest neighbors affinity
    sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
                            random_state=0)
    assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
    assert_equal(adjusted_rand_score(y, sp.labels_), 1)

    sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal(adjusted_rand_score(y, labels), 1)

    X = check_random_state(10).rand(10, 5) * 10

    kernels_available = kernel_metrics()
    for kern in kernels_available:
        # Additive chi^2 gives a negative similarity matrix which
        # doesn't make sense for spectral clustering
        if kern != 'additive_chi2':
            sp = SpectralClustering(n_clusters=2, affinity=kern,
                                    random_state=0)
            labels = sp.fit(X).labels_
            assert_equal((X.shape[0],), labels.shape)

    sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
                            random_state=0)
    labels = sp.fit(X).labels_
    assert_equal((X.shape[0],), labels.shape)

    def histogram(x, y, **kwargs):
        """Histogram kernel implemented as a callable."""
        assert_equal(kwargs, {})    # no kernel_params that we didn't ask for
        return np.minimum(x, y).sum()

    sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
    labels = sp.fit(X).labels_
    assert_equal((X.shape[0],), labels.shape)

    # raise error on unknown affinity
    sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
    assert_raises(ValueError, sp.fit, X)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:48,代码来源:test_spectral.py

示例14: doClustering

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
    def doClustering(self):
        photos = self.getClusteringData()
        
        features = []

        for p in photos:
            features.append( list(self.getCoordinates(p)))
        #km = KMeans(n_clusters = 10, init='k-means++', max_iter=100)
        #km.fit(features) 
        
        #algo = MeanShift()
        algo = SpectralClustering(4)
        algo.fit(np.asarray(features))

        f = file(self.file_name_prefix+'evening_msp_meanshift.csv', 'w')

        for idx in range(len(photos)):
            p = photos[idx]
            f.write( (str(p['location']['latitude'])+','+str(p['location']['longitude'])+','+str(algo.labels_[idx])+p['images']['standard_resolution']['url']+'\n' ))
开发者ID:oeddyo,项目名称:plaza,代码行数:21,代码来源:plaza_data.py

示例15: initializeW_clustering

# 需要导入模块: from sklearn.cluster import SpectralClustering [as 别名]
# 或者: from sklearn.cluster.SpectralClustering import fit [as 别名]
def initializeW_clustering(n,relationFileName, nClusters):
    W = np.identity(n+1)
    with open(relationFileName) as f:
        f.readline()
        for line in f:
            line = line.split('\t')            
            if int(line[0])<=n and int(line[1]) <=n:
                W[int(line[0])][int(line[1])] +=1   
    #KMeans
    '''
    kmeans = KMeans(n_clusters=nClusters)
    kmeans.fit(W)
    label = kmeans.labels_
    '''
    
    #SpectralClustering
    #spc = SpectralClustering(n_clusters=nClusters, affinity = "precomputed")
    spc = SpectralClustering(n_clusters=nClusters)
    spc.fit(W)   # What is the meaning
    label = spc.labels_
    

    with open(relationFileName+'.cluster','w') as f:
        for i in range(n):
            f.write(str(label[i])+'\n')
        
    NeighborW = np.zeros(shape=(nClusters, nClusters))
    for i in range(n):
        for j in range(n):
            if label[i]==label[j]:
                NeighborW[label[i]][label[j]] = 0
            else:
                NeighborW[label[i]][label[j]] += W[i][j]
    NormalizedNeighborW = normalizeByRow(NeighborW)

    newW = np.identity(nClusters) + NormalizedNeighborW   
    print 'newW', newW  

    NormalizednewW = normalizeByRow(newW)   
    print 'NormalizednewW', NormalizednewW.T

    return NormalizednewW.T, newW, label
开发者ID:huazhengwang,项目名称:CF_Bandit,代码行数:44,代码来源:LastFM_util_functions_2.py


注:本文中的sklearn.cluster.SpectralClustering.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。