本文整理汇总了Scala中org.apache.spark.mllib.regression.LinearRegressionWithSGD类的典型用法代码示例。如果您正苦于以下问题:Scala LinearRegressionWithSGD类的具体用法?Scala LinearRegressionWithSGD怎么用?Scala LinearRegressionWithSGD使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LinearRegressionWithSGD类的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: MllibSGD
//设置package包名称以及导入依赖的类
package optimizers
import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.{LogisticRegressionWithSGD, SVMWithSGD}
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.apache.spark.rdd.RDD
import utils.Functions._
import scala.tools.cmd.gen.AnyVals.D
class MllibSGD(val data: RDD[LabeledPoint],
loss: LossFunction,
regularizer: Regularizer,
params: SGDParameters,
ctype: String
) extends Optimizer(loss, regularizer) {
val opt = ctype match {
case "SVM" => new SVMWithSGD()
case "LR" => new LogisticRegressionWithSGD()
case "Regression" => new LinearRegressionWithSGD()
}
val reg: Updater = (regularizer: Regularizer) match {
case _: L1Regularizer => new L1Updater
case _: L2Regularizer => new SquaredL2Updater
case _: Unregularized => new SimpleUpdater
}
ctype match {
case "SVM" => opt.asInstanceOf[SVMWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "LR" => opt.asInstanceOf[LogisticRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "Regression" => opt.asInstanceOf[LinearRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
}
override def optimize(): Vector[Double] = {
val model = opt.run(data)
val w = model.weights.toArray
DenseVector(w)
}
}
示例2: PCAExample2
//设置package包名称以及导入依赖的类
package com.chapter11.SparkMachineLearning
import org.apache.spark.sql.SparkSession
import org.apache.spark.mllib.feature.PCA
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.mllib.regression.LinearRegressionWithSGD
object PCAExample2 {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.master("local[*]")
.config("spark.sql.warehouse.dir", "E:/Exp/")
.appName(s"OneVsRestExample")
.getOrCreate()
val data = MLUtils.loadLibSVMFile(spark.sparkContext, "data/mnist.bz2")
val df = spark.read.format("libsvm").load("C:/Exp/mnist.bz2")
df.show(20)
val featureSize = data.first().features.size
println("Feature Size: " + featureSize)
val splits = data.randomSplit(Array(0.75, 0.25), seed = 12345L)
val (training, test) = (splits(0), splits(1))
val pca = new PCA(featureSize/2).fit(data.map(_.features))
val training_pca = training.map(p => p.copy(features = pca.transform(p.features)))
val test_pca = test.map(p => p.copy(features = pca.transform(p.features)))
val numIterations = 20
val stepSize = 0.0001
val model = LinearRegressionWithSGD.train(training, numIterations, stepSize)
val model_pca = LinearRegressionWithSGD.train(training_pca, numIterations, stepSize)
val valuesAndPreds = test.map { point =>
val score = model.predict(point.features)
(score, point.label)
}
val valuesAndPreds_pca = test_pca.map { point =>
val score = model_pca.predict(point.features)
(score, point.label)
}
val MSE = valuesAndPreds.map { case (v, p) => math.pow(v - p, 2) }.mean()
val MSE_pca = valuesAndPreds_pca.map { case (v, p) => math.pow(v - p, 2) }.mean()
println("Mean Squared Error = " + MSE)
println("PCA Mean Squared Error = " + MSE_pca)
println("Model coefficients:"+ model.toString())
println("Model with PCA coefficients:"+ model_pca.toString())
spark.stop()
}
}
开发者ID:PacktPublishing,项目名称:Scala-and-Spark-for-Big-Data-Analytics,代码行数:61,代码来源:PCA_LinearRegression_Demo.scala