本文整理汇总了Python中sklearn.cluster方法的典型用法代码示例。如果您正苦于以下问题:Python sklearn.cluster方法的具体用法?Python sklearn.cluster怎么用?Python sklearn.cluster使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn
的用法示例。
在下文中一共展示了sklearn.cluster方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_options
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def process_options(args):
options = argparser().parse_args(args)
if options.max_rank is not None and options.max_rank < 1:
raise ValueError('max-rank must be >= 1')
if options.eps <= 0.0:
raise ValueError('eps must be > 0')
wv = wvlib.load(options.vectors[0], max_rank=options.max_rank)
if options.normalize:
logging.info('normalize vectors to unit length')
wv.normalize()
words, vectors = wv.words(), wv.vectors()
if options.whiten:
logging.info('normalize features to unit variance')
vectors = scipy.cluster.vq.whiten(vectors)
return words, vectors, options
示例2: run_kmeans
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def run_kmeans(features, n_cluster):
"""
Run kmeans on a set of features to find <n_cluster> cluster.
Args:
features: np.ndarrary [n_samples x embed_dim], embedding training/testing samples for which kmeans should be performed.
n_cluster: int, number of cluster.
Returns:
cluster_assignments: np.ndarray [n_samples x 1], per sample provide the respective cluster label it belongs to.
"""
n_samples, dim = features.shape
kmeans = faiss.Kmeans(dim, n_cluster)
kmeans.n_iter, kmeans.min_points_per_centroid, kmeans.max_points_per_centroid = 20,5,1000000000
kmeans.train(features)
_, cluster_assignments = kmeans.index.search(features,1)
return cluster_assignments
示例3: error
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def error(cluster, target_cluster, k):
""" Compute error between cluster and target cluster
:param cluster: proposed cluster
:param target_cluster: target cluster
:return: error
"""
n = np.shape(target_cluster)[0]
M = np.zeros((k, k))
for i in range(k):
for j in range(k):
M[i][j] = np.sum(np.logical_and(cluster == i, target_cluster == j))
m = Munkres()
indexes = m.compute(-M)
corresp = []
for i in range(k):
corresp.append(indexes[i][1])
pred_corresp = [corresp[int(predicted)] for predicted in cluster]
acc = np.sum(pred_corresp == target_cluster) / float(len(target_cluster))
return acc
示例4: cluster
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def cluster(train_latents, train_labels, test_latents, test_labels):
num_classes = np.shape(train_labels)[-1]
labels_hot = np.argmax(test_labels, axis=-1)
train_latents = np.reshape(train_latents,
newshape=[train_latents.shape[0], -1])
test_latents = np.reshape(test_latents,
newshape=[test_latents.shape[0], -1])
kmeans = KMeans(init='random', n_clusters=num_classes,
random_state=0, max_iter=1000, n_init=FLAGS.n_init,
n_jobs=FLAGS.n_jobs)
kmeans.fit(train_latents)
print(kmeans.cluster_centers_)
print('Train/Test k-means objective = %.4f / %.4f' %
(-kmeans.score(train_latents), -kmeans.score(test_latents)))
print('Train/Test accuracy %.4f / %.3f' %
(error(np.argmax(train_labels, axis=-1), kmeans.predict(train_latents), k=num_classes),
error(np.argmax(test_labels, axis=-1), kmeans.predict(test_latents), k=num_classes)))
return error(labels_hot, kmeans.predict(test_latents), k=num_classes)
示例5: __call__
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def __call__(self, features: np.array, term_index: list, use_tfidf: bool = True, **options):
"""
Just call activated class instance to cluster data.
:param features: np.array - term frequency matrix
:param term_index: list - list of term frequency matrix indexes
:param use_tfidf: bool - whether to use TF IDF Transformer
:param options: **dict - unpacked cluster algorithm options
:return: ClusterEngine instance with attributes listed in __init__
"""
self.features = features
self.term_index = term_index
self.num_records = features.shape[0]
self.use_tfidf = use_tfidf
self.user_options = options
self.n_clusters = options.get('n_clusters')
self.cluster_model = self.get_model()
return self.cluster()
示例6: cluster
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def cluster(evecs, Cnorm, k, in_bound_idxs=None):
X = evecs[:, :k] / (Cnorm[:, k - 1:k] + 1e-5)
KM = sklearn.cluster.KMeans(n_clusters=k, n_init=50, max_iter=500)
seg_ids = KM.fit_predict(X)
###############################################################
# Locate segment boundaries from the label sequence
if in_bound_idxs is None:
bound_beats = 1 + np.flatnonzero(seg_ids[:-1] != seg_ids[1:])
# Count beats 0 as a boundary
bound_idxs = librosa.util.fix_frames(bound_beats, x_min=0)
else:
bound_idxs = in_bound_idxs
# Compute the segment label for each boundary
bound_segs = list(seg_ids[bound_idxs])
# Tack on the end-time
bound_idxs = list(np.append(bound_idxs, len(Cnorm) - 1))
return bound_idxs, bound_segs
示例7: do_segmentation
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def do_segmentation(C, M, config, in_bound_idxs=None):
embedding = embed_beats(C, M, config)
Cnorm = np.cumsum(embedding ** 2, axis=1) ** 0.5
if config["hier"]:
est_idxs = []
est_labels = []
for k in range(1, config["num_layers"] + 1):
est_idx, est_label = cluster(embedding, Cnorm, k)
est_idxs.append(est_idx)
est_labels.append(np.asarray(est_label, dtype=np.int))
else:
est_idxs, est_labels = cluster(embedding, Cnorm, config["scluster_k"], in_bound_idxs)
est_labels = np.asarray(est_labels, dtype=np.int)
return est_idxs, est_labels, Cnorm
示例8: save
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def save(self, name, resolution, gain, equalize=True, cluster='agglomerative', statistics='db', max_K=5):
"""
Generates a topological representation using the Mapper algorithm with resolution and gain specified by the
parameters 'resolution' and 'gain'. When equalize is set to True, patches are chosen such that they
contain the same number of points. The parameter 'cluster' specifies the clustering method ('agglomerative' or
'kmeans'). The parameter 'statistics' specifies the criterion for choosing the optimal number of clusters
('db' for Davies-Bouildin index, or 'gap' for the gap statistic). The parameter 'max_K' specifies the maximum
number of clusters to be considered within each patch. The topological representation is stored in the files
'name.gexf' and 'name.json'. It returns a dictionary with the patches.
"""
G, all_clusters, patches = sakmapper.mapper_graph(self.df, lens_data=self.lens_data_mds,
resolution=resolution,
gain=gain, equalize=equalize, clust=cluster,
stat=statistics, max_K=max_K)
dic = {}
for n, rs in enumerate(all_clusters):
dic[str(n)] = map(lambda x: int(x), rs)
with open(name + '.json', 'wb') as handle3:
json.dump(dic, handle3)
networkx.write_gexf(G, name + '.gexf')
return patches
示例9: cellular_subpopulations
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def cellular_subpopulations(self, threshold=0.05, min_cells=5, clus_thres=0.65):
"""
Identifies potential transient cellular subpopulations. The parameter
'threshold' sets an upper bound of the q-value of the genes that are considered in the analysis.
The parameter 'min_cells' sets the minimum number of cells on which each of the genes considered in the
analysis is expressed. Cellular subpopulations are determined by clustering the Jensen-Shannon distance
matrix of the genes that pass all the constraints. The number of clusters is controlled in this case by
the parameter 'clus_thres'. In both cases a list with the genes associated to each cluster is returned.
It requires the presence of the file 'name.genes.tsv', produced by the method RotedGraph.save().
"""
con = []
dis = []
nam = []
f = open(self.name + '.genes.tsv', 'r')
for n, line in enumerate(f):
if n > 0:
sp = line[:-1].split('\t')
if float(sp[7]) < threshold and float(sp[1]) > min_cells:
nam.append(sp[0])
f.close()
mat2 = self.JSD_matrix(nam)
return [map(lambda xx: nam[xx], m)
for m in find_clusters(hierarchical_clustering(mat2, labels=nam,
cluster_distance=True, thres=clus_thres)).values()]
示例10: computeF1_macro
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def computeF1_macro(confusion_matrix,matching, num_clusters):
"""
computes the macro F1 score
confusion matrix : requres permutation
matching according to which matrix must be permuted
"""
##Permute the matrix columns
permuted_confusion_matrix = np.zeros([num_clusters,num_clusters])
for cluster in xrange(num_clusters):
matched_cluster = matching[cluster]
permuted_confusion_matrix[:,cluster] = confusion_matrix[:,matched_cluster]
##Compute the F1 score for every cluster
F1_score = 0
for cluster in xrange(num_clusters):
TP = permuted_confusion_matrix[cluster,cluster]
FP = np.sum(permuted_confusion_matrix[:,cluster]) - TP
FN = np.sum(permuted_confusion_matrix[cluster,:]) - TP
precision = TP/(TP + FP)
recall = TP/(TP + FN)
f1 = stats.hmean([precision,recall])
F1_score += f1
F1_score /= num_clusters
return F1_score
示例11: test_monkey_patching
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def test_monkey_patching(self):
_tokens = daal4py.sklearn.sklearn_patch_names()
self.assertTrue(isinstance(_tokens, list) and len(_tokens) > 0)
for t in _tokens:
daal4py.sklearn.unpatch_sklearn(t)
for t in _tokens:
daal4py.sklearn.patch_sklearn(t)
import sklearn
for a in [(sklearn.decomposition, 'PCA'),
(sklearn.linear_model, 'Ridge'),
(sklearn.linear_model, 'LinearRegression'),
(sklearn.cluster, 'KMeans'),
(sklearn.svm, 'SVC'),]:
class_module = getattr(a[0], a[1]).__module__
self.assertTrue(class_module.startswith('daal4py'))
示例12: kmeans
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def kmeans(X_train, y_train, X_val, y_val):
n_clusters = 10
kmeans = KMeans(n_clusters=n_clusters, random_state=0, verbose=0, n_jobs=int(0.8*n_cores)).fit(X_train)
c_train = kmeans.predict(X_train)
c_pred = kmeans.predict(X_val)
centroids = kmeans.cluster_centers_
for i in range(n_clusters):
print('--------analyzing cluster %d--------' %i)
train_mask = c_train==i
std_train = np.std(y_train[train_mask])
mean_train = np.mean(y_train[train_mask])
print("# examples & price mean & std for training set within cluster %d is:(%d, %.2f, %.2f)" %(i, train_mask.sum(), np.float(mean_train), np.float(std_train)))
pred_mask = c_pred==i
std_pred = np.std(y_val[pred_mask])
mean_pred = np.mean(y_val[pred_mask])
print("# examples & price mean & std for validation set within cluster %d is:(%d, %.2f, %.2f)" %(i, pred_mask.sum(), np.float(mean_pred), np.float(std_pred)))
if pred_mask.sum() == 0:
print('Zero membered test set! Skipping the test and training validation.')
continue
LinearModel(X_train[train_mask], y_train[train_mask], X_val[pred_mask], y_val[pred_mask])
print('--------Finished analyzing cluster %d--------' %i)
return c_pred, centroids
示例13: predict
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
X = self._check_array(X)
labels = pairwise_distances_argmin_min(X, self.cluster_centers_)[0].astype(
np.int32
)
return labels
示例14: process_options
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def process_options(args):
options = argparser().parse_args(args)
if options.max_rank is not None and options.max_rank < 1:
raise ValueError('max-rank must be >= 1')
if options.k is not None and options.k < 2:
raise ValueError('cluster number must be >= 2')
if options.method == MINIBATCH_KMEANS and not with_sklearn:
logging.warning('minibatch kmeans not available, using kmeans (slow)')
options.method = KMEANS
if options.jobs != 1 and (options.method != KMEANS or not with_sklearn):
logging.warning('jobs > 1 only supported scikit-learn %s' % KMEANS)
options.jobs = 1
wv = wvlib.load(options.vectors[0], max_rank=options.max_rank)
if options.k is None:
options.k = int(math.ceil((len(wv.words())/2)**0.5))
logging.info('set k=%d (%d words)' % (options.k, len(wv.words())))
if options.normalize:
logging.info('normalize vectors to unit length')
wv.normalize()
words, vectors = wv.words(), wv.vectors()
if options.whiten:
logging.info('normalize features to unit variance')
vectors = scipy.cluster.vq.whiten(vectors)
return words, vectors, options
示例15: minibatch_kmeans
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import cluster [as 别名]
def minibatch_kmeans(vectors, k):
if not with_sklearn:
raise NotImplementedError
# Sculley (http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf)
# uses batch size 1000. sklearn KMeans defaults to n_init 10
kmeans = sklearn.cluster.MiniBatchKMeans(k, batch_size=1000, n_init=10)
kmeans.fit(vectors)
return kmeans.labels_