本文整理匯總了Python中pyspark.ml.clustering.KMeans類的典型用法代碼示例。如果您正苦於以下問題:Python KMeans類的具體用法?Python KMeans怎麽用?Python KMeans使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了KMeans類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: kmeans
def kmeans(df):
kmeans = KMeans(k=2,seed=1)
model = kmeans.fit(df)
centers = model.clusterCenters()
print len(centers)
kmFeatures = model.transform(df).select("features", "prediction")
dfwrite(kmFeatures,'kmFeatures')
示例2: test_kmeans_cosine_distance
def test_kmeans_cosine_distance(self):
data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),
(Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),
(Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=3, seed=1, distanceMeasure="cosine")
model = kmeans.fit(df)
result = model.transform(df).collect()
self.assertTrue(result[0].prediction == result[1].prediction)
self.assertTrue(result[2].prediction == result[3].prediction)
self.assertTrue(result[4].prediction == result[5].prediction)
示例3: clustering
def clustering(input_df, input_col_name, n):
""" KMeans and PCA """
input_df = input_df.select('state','categories','stars',input_col_name)
norm = Normalizer(inputCol=input_col_name, outputCol="features", p=1.0)
df = norm.transform(input_df)
kmeans = KMeans(k=n, seed=2)
KMmodel = kmeans.fit(df)
predicted = KMmodel.transform(df).cache()
pca = PCA(k=2, inputCol='features', outputCol="pc")
df = pca.fit(dfsample).transform(dfsample).cache()
return df
示例4: test_kmeans_param
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
示例5: test_kmeans_summary
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 1)
示例6: test_kmean_pmml_basic
def test_kmean_pmml_basic(self):
# Most of the validation is done in the Scala side, here we just check
# that we output text rather than parquet (e.g. that the format flag
# was respected).
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
path = tempfile.mkdtemp()
km_path = path + "/km-pmml"
model.write().format("pmml").save(km_path)
pmml_text_list = self.sc.textFile(km_path).collect()
pmml_text = "\n".join(pmml_text_list)
self.assertIn("Apache Spark", pmml_text)
self.assertIn("PMML", pmml_text)
示例7: kmeans
def kmeans(inputdir,df,alg,k):
from pyspark.ml.clustering import KMeans
from numpy import array
from math import sqrt
kmeans = KMeans(k=int(k), seed=1,initSteps=5, tol=1e-4, maxIter=20, initMode="k-means||", featuresCol="features")
model = kmeans.fit(df)
kmFeatures = model.transform(df).select("labels", "prediction")
erFeatures = model.transform(df).select("features", "prediction")
###Evaluation
rows = erFeatures.collect()
WSSSE = 0
for i in rows:
WSSSE += sqrt(sum([x**2 for x in (model.clusterCenters()[i[1]]-i[0])]))
print("Within Set Sum of Squared Error = " + str(WSSSE))
output_data = writeOutClu(inputdir,kmFeatures,alg,k,WSSSE)
return output_data
示例8: test_kmeans
def test_kmeans(self):
kmeans = KMeans(k=2, seed=1)
path = tempfile.mkdtemp()
km_path = path + "/km"
kmeans.save(km_path)
kmeans2 = KMeans.load(km_path)
self.assertEqual(kmeans.uid, kmeans2.uid)
self.assertEqual(type(kmeans.uid), type(kmeans2.uid))
self.assertEqual(kmeans2.uid, kmeans2.k.parent,
"Loaded KMeans instance uid (%s) did not match Param's uid (%s)"
% (kmeans2.uid, kmeans2.k.parent))
self.assertEqual(kmeans._defaultParamMap[kmeans.k], kmeans2._defaultParamMap[kmeans2.k],
"Loaded KMeans instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
示例9: cluster
def cluster():
ld = load(open(DATAP+'\\temp\olangdict.json','r',encoding='UTF-8'))
spark = SparkSession.builder\
.master("local")\
.appName("Word Count")\
.config("spark.some.config.option", "some-value")\
.getOrCreate()
df = spark.createDataFrame([["0"],
["1"],
["2"],
["3"],
["4"]],
["id"])
df.show()
vecAssembler = VectorAssembler(inputCols=["feat1", "feat2"], outputCol="features")
new_df = vecAssembler.transform(df)
kmeans = KMeans(k=2, seed=1) # 2 clusters here
model = kmeans.fit(new_df.select('features'))
transformed = model.transform(new_df)
print(transformed.show())
示例10: SparkContext
from pyspark.mllib.linalg import Vectors
from pyspark.ml.clustering import KMeans
from pyspark import SparkContext
from pyspark.sql import SQLContext
# sc = SparkContext(appName="test")
# sqlContext = SQLContext(sc)
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = sqlContext.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
centers = model.clusterCenters()
model.transform(df).select("features", "prediction").collect()
示例11: KMeans
sales = va.transform(spark.read.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load("/data/retail-data/by-day/*.csv")
.limit(50)
.coalesce(1)
.where("Description IS NOT NULL"))
sales.cache()
# COMMAND ----------
from pyspark.ml.clustering import KMeans
km = KMeans().setK(5)
print km.explainParams()
kmModel = km.fit(sales)
# COMMAND ----------
summary = kmModel.summary
print summary.clusterSizes # number of points
kmModel.computeCost(sales)
centers = kmModel.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
開發者ID:yehonatc,項目名稱:Spark-The-Definitive-Guide,代碼行數:28,代碼來源:Advanced_Analytics_and_Machine_Learning-Chapter_29_Unsupervised_Learning.py
示例12: assign_cluster
def assign_cluster(data):
"""Train kmeans on rescaled data and then label the rescaled data."""
kmeans = KMeans(k=2, seed=1, featuresCol="features_scaled", predictionCol="label")
model = kmeans.fit(data)
label_df = model.transform(data)
return label_df
示例13: KMeans
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("KMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a k-means model.
kmeans = KMeans().setK(2).setSeed(1)
model = kmeans.fit(dataset)
# Make predictions
predictions = model.transform(dataset)
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
示例14: print
print(colStdDev)
#Place the means and std.dev values in a broadcast variable
bcMeans = sc.broadcast(colMeans)
bcStdDev = sc.broadcast(colStdDev)
csAuto = autoVector.map(centerAndScale)
#csAuto.collect()
#csAuto.foreach(println)
print(csAuto)
#Create Spark Data Frame
autoRows = csAuto.map(lambda f:Row(features=f))
autoDf = SQLContext.createDataFrame(autoRows)
autoDf.select("features").show(10)
kmeans = KMeans(k=3, seed=1)
model = kmeans.fit(autoDf)
predictions = model.transform(autoDf)
predictions.collect()
predictions.foreach(println)
#Plot the results in a scatter plot
unstripped = predictions.map(unstripData)
predList=unstripped.collect()
predPd = pd.DataFrame(predList)
# preparing to save the clustered data
list_current_gni_final_maped = current_gni_final_maped.collect()
list_current_gni_rdd = current_gni_rdd.collect()
list_predictions_pandas=predictions.toPandas()
list_predictions_temp=list_predictions_pandas.as_matrix()
示例15: VectorAssembler
trainingData = VectorAssembler(inputCols=["duration", "tempo", "loudness"], outputCol="features").transform(
table("songsTable")
)
# COMMAND ----------
# MAGIC %md We can now pass this new DataFrame to the `KMeans` model and ask it to categorize different rows in our data to two different classes (`setK(2)`). We place the model in a variable named `model`.
# MAGIC
# MAGIC **Note:** This command multiple spark jobs (one job per iteration in the KMeans algorithm). You will see the progress bar starting over and over again.
# COMMAND ----------
from pyspark.ml.clustering import KMeans
model = KMeans().setK(2).fit(trainingData)
# COMMAND ----------
# MAGIC %md To see the result of our clustering, we produce a scatter plot matrix that shows interaction between input variables and learned clusters. To get that we apply the model on the original data and pick four columns: `prediction` and the original features (`duration`, `tempo`, and `loudness`).
# COMMAND ----------
transformed = model.transform(trainingData).select("duration", "tempo", "loudness", "prediction")
# COMMAND ----------
# MAGIC %md To comfortably visualize the data we produce a random sample.
# MAGIC Remember the `display()` function? We can use it to produce a nicely rendered table of transformed DataFrame.
# COMMAND ----------