本文整理汇总了Python中sklearn.cluster.AgglomerativeClustering类的典型用法代码示例。如果您正苦于以下问题:Python AgglomerativeClustering类的具体用法?Python AgglomerativeClustering怎么用?Python AgglomerativeClustering使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AgglomerativeClustering类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Word2VecReduction
def Word2VecReduction(senlist, w2vec, ratio):
slen = len(senlist)
word_matrix = []
word2label = {}
idx2word = {}
useword = set([])
cnt = 0
for i in range(0, slen):
for word in senlist[i].word_used:
if word not in useword: #and word in w2vec:
idx2word[cnt] = word
cnt += 1
useword.add(word)
word_matrix.append(w2vec[word])
wlen = len(useword)
print "use words:", wlen
nclusters = max(int(0.9*wlen), 100)
print nclusters
AgloCluster = AgglomerativeClustering(n_clusters=nclusters,linkage="average", affinity='cosine')
AgloCluster.fit(word_matrix)
AgloCluster_labels = AgloCluster.labels_
for i in range(0, wlen):
word2label[idx2word[i]] = AgloCluster_labels[i]
for i in range(0, slen):
senlist[i].sen_words = [ str(word2label[w]) for w in senlist[i].word_used]
senlist[i].word_dict = {}
#print senlist[i].sen_words
return
示例2: cluster_agg
def cluster_agg(cluster_data):
clstr = AgglomerativeClustering(n_clusters=11, linkage='ward')
clstr.fit(cluster_data)
df['tier'] = clstr.labels_
results = df[['Player', 'tier']]
return results
示例3: programmer_3
def programmer_3():
standardizedfile = "data/standardized.xls"
k = 3
data = pd.read_excel(standardizedfile, index_col=u"基站编号")
# 层次聚类
model = AgglomerativeClustering(n_clusters=k, linkage="ward")
model.fit(data)
# 详细输入原始数据及对应类别
r = pd.concat([data, pd.Series(model.labels_, index=data.index)], axis=1)
r.columns = list(data.columns) + [u"聚类类别"]
# 绘制聚类图,并且用不同样式进行画图
style = ["ro-", "go-", "bo-"]
xlabels = [u"工作日人均停留时间", u"凌晨人均停留时间", u"周末人均停留时间", u"日均人流量"]
pic_output = "tmp/type_"
for i in range(k):
plt.figure()
tmp = r[r[u"聚类类别"] == i].iloc[:, :4]
for j in range(len(tmp)):
plt.plot(range(1, 5), tmp.iloc[j], style[i])
plt.xticks(range(1, 5), xlabels, rotation=20)
plt.title(u"商圈类别%s" % (i + 1))
# 调整底部
plt.subplots_adjust(bottom=0.15)
plt.savefig(u"%s%s.png" % (pic_output, i + 1))
示例4: train_agglomerative
def train_agglomerative():
print "starting agglomerative clustering..."
model = AgglomerativeClustering(n_clusters=num_clusters, affinity=aggl_affinity,
linkage=aggl_linkage)
model.fit(X)
labels = model.labels_
print labels
示例5: agglomerative_clusters
def agglomerative_clusters(self, word_vectors):
#Pre-calculate BallTree object
starting = time.time()
Ball_Tree = BallTree(word_vectors, leaf_size = 200, metric = "minkowski")
print("BallTree object in " + str(time.time() - starting))
#Pre-calculate k_neighbors graph
starting = time.time()
connectivity_graph = kneighbors_graph(Ball_Tree,
n_neighbors = 1,
mode = "connectivity",
metric = "minkowski",
p = 2,
include_self = False,
n_jobs = workers
)
print("Pre-compute connectivity graph in " + str(time.time() - starting))
#Agglomerative clustering
starting = time.time()
Agl = AgglomerativeClustering(n_clusters = 100,
affinity = "minkowski",
connectivity = connectivity_graph,
compute_full_tree = True,
linkage = "average"
)
Agl.fit(word_vectors)
print("Agglomerative clustering in " + str(time.time() - starting))
clusters = Agl.labels_
return clusters
示例6: test_connectivity_propagation
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array(
[
(0.014, 0.120),
(0.014, 0.099),
(0.014, 0.097),
(0.017, 0.153),
(0.017, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.152),
(0.018, 0.149),
(0.018, 0.144),
]
)
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(n_clusters=4, connectivity=connectivity, linkage="ward")
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
示例7: hierarchical
def hierarchical(similarity, concepts=2, euclid=False):
if euclid:
model = AgglomerativeClustering(n_clusters=concepts)
return model.fit_predict(similarity)
else:
model = AgglomerativeClustering(n_clusters=concepts, affinity='precomputed', linkage='complete')
return model.fit_predict(1 - similarity)
示例8: classify_core
def classify_core(self, N_CLUSTERS, clusterType, data_for_trial_type, begin_time, end_time):
BEGIN_TIME_FRAME = begin_time*self.griddy.TIME_GRID_SPACING
END_TIME_FRAME = end_time*self.griddy.TIME_GRID_SPACING
data = data_for_trial_type[:,BEGIN_TIME_FRAME:END_TIME_FRAME,self.griddy.VEL_X]
labels = None
if clusterType == 'kmeans':
kmeans = KMeans(n_clusters=N_CLUSTERS)
kmeans.fit(data)
labels = kmeans.labels_
elif clusterType == 'affinity_propagation':
ap = AffinityPropagation(damping=0.75)
ap.fit(data)
labels = ap.labels_
N_CLUSTERS = np.max(self.labels)+1
elif clusterType == 'DBSCAN':
dbscan = DBSCAN()
dbscan.fit(data)
labels = dbscan.labels_
N_CLUSTERS = np.max(labels)+1
print 'N_CLUSTERS=' + str(N_CLUSTERS)
elif clusterType == 'AgglomerativeClustering':
ac = AgglomerativeClustering(n_clusters=N_CLUSTERS)
ac.fit(data)
labels = ac.labels_
else:
print 'ERROR: clusterType: ' + clusterType + ' is not recognized'
return (labels, N_CLUSTERS)
开发者ID:SashaRayshubskiy,项目名称:osmotropotaxis_analysis_python,代码行数:31,代码来源:fly_trajectory_classifier.py
示例9: wardHierarchical
def wardHierarchical(img):
connectivity = grid_to_graph(*img.shape)
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
face = sp.misc.imresize(img, 0.10) / 255.
X = np.reshape(img, (-1, 1))
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
示例10: buckshot
def buckshot(k, mat):
size = int((k*mat.shape[0])**.5)
print size
samp = np.zeros((size, mat.shape[1]))
inds = np.random.randint(0, mat.shape[0], size)
print inds
for i in xrange(size):
samp[i] = mat[inds[i]]
#agglomerative clusting on sample
hier = AgglomerativeClustering(n_clusters=k, linkage='average', affinity='euclidean', compute_full_tree=True)
flat = hier.fit_predict(samp)
centroids = []
#find centroids
for j in xrange(k):
i_s = [i for i, l in enumerate(flat) if l == j]
print len(i_s)
points = [samp[m] for m in i_s]
points = np.array(points)
cent = np.mean(points, axis=0)
centroids.append(cent)
return centroids
示例11: clustering_tweets_hc
def clustering_tweets_hc(labeled_tweets, num_cluster):
vectorizer = cst_vectorizer.StemmedTfidfVectorizer(**param)
tweet_vec = vectorizer.fit_transform(labeled_tweets).toarray()
# print(tweet_vec)
n_clusters = num_cluster
from sklearn.neighbors import kneighbors_graph
knn_graph = kneighbors_graph(tweet_vec, 1, include_self=False)
# print(knn_graph)
connectivity = knn_graph
from sklearn.cluster import AgglomerativeClustering
model = AgglomerativeClustering(linkage='ward', connectivity=connectivity, n_clusters=n_clusters)
model.fit(tweet_vec)
c = model.labels_
# print(c,len(c))
clustered_tweets = []
for i in range(0, num_cluster):
similar_indices = (c == i).nonzero()[0]
sent = ''
for sid in similar_indices:
sent = labeled_tweets[sid] + ' ' + sent
clustered_tweets.append(sent)
return clustered_tweets
示例12: clustering_approach
def clustering_approach(self):
'''
Cluster user data using various clustering algos
IN: self.df_full and self.labels
OUT: results to stdout
'''
print 'Fitting clustering model'
X = self.df_full.values
y = self.labels
# scale data
scaler = StandardScaler()
X = scaler.fit_transform(X)
# KMeans
km_clf = KMeans(n_clusters=2, n_jobs=6)
km_clf.fit(X)
# swap labels as super-users are in cluster 0 (messy!!)
temp = y.apply(lambda x: 0 if x == 1 else 1)
print '\nKMeans clustering: '
self.analyse_preds(temp, km_clf.labels_)
# Agglomerative clustering
print '\nAgglomerative clustering approach: '
ac_clf = AgglomerativeClustering()
ac_labels = ac_clf.fit_predict(X)
self.analyse_preds(y, ac_labels)
return None
示例13: plot_mfi
def plot_mfi(self, outputfile='embeddings.pdf', nb_clusters=8, weights='NA'):
# collect embeddings for mfi:
X = np.asarray([self.w2v_model[w] for w in self.mfi \
if w in self.w2v_model], dtype='float32')
# dimension reduction:
tsne = TSNE(n_components=2)
coor = tsne.fit_transform(X) # unsparsify
plt.clf()
sns.set_style('dark')
sns.plt.rcParams['axes.linewidth'] = 0.4
fig, ax1 = sns.plt.subplots()
labels = self.mfi
# first plot slices:
x1, x2 = coor[:,0], coor[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none')
# clustering on top (add some colouring):
clustering = AgglomerativeClustering(linkage='ward',
affinity='euclidean', n_clusters=nb_clusters)
clustering.fit(coor)
# add names:
for x, y, name, cluster_label in zip(x1, x2, labels, clustering.labels_):
ax1.text(x, y, name, ha='center', va="center",
color=plt.cm.spectral(cluster_label / 10.),
fontdict={'family': 'Arial', 'size': 8})
# control aesthetics:
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_xticklabels([])
ax1.set_xticks([])
ax1.set_yticklabels([])
ax1.set_yticks([])
sns.plt.savefig(outputfile, bbox_inches=0)
示例14: sp_connectivity
def sp_connectivity(self,X,connectivity, n_clusters, n):
# plt.figure(figsize=(10, 4))
# plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage="ward",
connectivity=connectivity,
n_clusters=n_clusters)
#t0 = time.time()
y = np.zeros(shape=(n))
y = model.fit_predict(X, None)
#elapsed_time = time.time() - t0
return y
#plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
# cmap=plt.cm.spectral)
#plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
# fontdict=dict(verticalalignment='top'))
#plt.axis('equal')
#plt.axis('off')
#plt.subplots_adjust(bottom=0, top=.89, wspace=0,
# left=0, right=1)
# plt.suptitle('n_cluster=%i, connectivity=%r' %
# (n_clusters, connectivity is not None), size=17)
#plt.show()
示例15: test_agglomerative_clustering_with_distance_threshold
def test_agglomerative_clustering_with_distance_threshold(linkage):
# Check that we obtain the correct number of clusters with
# agglomerative clustering with distance_threshold.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
# test when distance threshold is set to 10
distance_threshold = 10
for conn in [None, connectivity]:
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
connectivity=conn, linkage=linkage)
clustering.fit(X)
clusters_produced = clustering.labels_
num_clusters_produced = len(np.unique(clustering.labels_))
# test if the clusters produced match the point in the linkage tree
# where the distance exceeds the threshold
tree_builder = _TREE_BUILDERS[linkage]
children, n_components, n_leaves, parent, distances = \
tree_builder(X, connectivity=conn, n_clusters=None,
return_distance=True)
num_clusters_at_threshold = np.count_nonzero(
distances >= distance_threshold) + 1
# test number of clusters produced
assert num_clusters_at_threshold == num_clusters_produced
# test clusters produced
clusters_at_threshold = _hc_cut(n_clusters=num_clusters_produced,
children=children,
n_leaves=n_leaves)
assert np.array_equiv(clusters_produced,
clusters_at_threshold)