本文整理汇总了Python中sklearn.cluster.KMeans.train方法的典型用法代码示例。如果您正苦于以下问题:Python KMeans.train方法的具体用法?Python KMeans.train怎么用?Python KMeans.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.cluster.KMeans
的用法示例。
在下文中一共展示了KMeans.train方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _fit
# 需要导入模块: from sklearn.cluster import KMeans [as 别名]
# 或者: from sklearn.cluster.KMeans import train [as 别名]
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
示例2: standard_spark_kmeans
# 需要导入模块: from sklearn.cluster import KMeans [as 别名]
# 或者: from sklearn.cluster.KMeans import train [as 别名]
def standard_spark_kmeans(data, k, max_iter, random_state):
t1 = time()
from pyspark.mllib.clustering import KMeans
from math import sqrt
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName('K-Means_Spark').setMaster('local[%d]'%10)
sc = SparkContext(conf=conf)
data = sc.parallelize(data)
# Build the model (cluster the data)
clusters = KMeans.train(data, k, maxIterations=max_iter, runs=10, initializationMode="random", seed=random_state, epsilon=1e-4)
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = data.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print time() - t1
print WSSSE