本文整理汇总了Scala中org.apache.spark.mllib.optimization.SquaredL2Updater类的典型用法代码示例。如果您正苦于以下问题:Scala SquaredL2Updater类的具体用法?Scala SquaredL2Updater怎么用?Scala SquaredL2Updater使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SquaredL2Updater类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: MllibLBFGS
//设置package包名称以及导入依赖的类
package optimizers
import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import utils.Functions._
class MllibLBFGS(val data: RDD[LabeledPoint],
loss: LossFunction,
regularizer: Regularizer,
params: LBFGSParameters
) extends Optimizer(loss, regularizer) {
val opt = new LogisticRegressionWithLBFGS
val reg: Updater = (regularizer: Regularizer) match {
case _: L1Regularizer => new L1Updater
case _: L2Regularizer => new SquaredL2Updater
case _: Unregularized => new SimpleUpdater
}
opt.optimizer.
setNumIterations(params.iterations).
setConvergenceTol(params.convergenceTol).
setNumCorrections(params.numCorrections).
setRegParam(regularizer.lambda).
setUpdater(reg)
override def optimize(): Vector[Double] = {
val model = opt.run(data)
val w = model.weights.toArray
return DenseVector(w)
}
}
示例2: SparkSGD
//设置package包名称以及导入依赖的类
package linalg.sgd
import scala.util.Random
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.optimization.GradientDescent
import org.apache.spark.mllib.optimization.SquaredL2Updater
import org.apache.spark.mllib.optimization.LogisticGradient
import org.apache.spark.SparkContext
object SparkSGD {
def main(args: Array[String]): Unit = {
val m = 4
val n = 200000
val sc = new SparkContext("local[2]", "")
val points = sc.parallelize(0 until m, 2).mapPartitionsWithIndex { (idx, iter) =>
val random = new Random(idx)
iter.map(i => (1.0, Vectors.dense(Array.fill(n)(random.nextDouble()))))
}.cache()
val (weights, loss) = GradientDescent.runMiniBatchSGD(
points,
new LogisticGradient,
new SquaredL2Updater,
0.1,
2,
1.0,
1.0,
Vectors.dense(new Array[Double](n)))
println("w:" + weights(0))
println("loss:" + loss(0))
sc.stop()
}
}
示例3: MllibSGD
//设置package包名称以及导入依赖的类
package optimizers
import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.{LogisticRegressionWithSGD, SVMWithSGD}
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.apache.spark.rdd.RDD
import utils.Functions._
import scala.tools.cmd.gen.AnyVals.D
class MllibSGD(val data: RDD[LabeledPoint],
loss: LossFunction,
regularizer: Regularizer,
params: SGDParameters,
ctype: String
) extends Optimizer(loss, regularizer) {
val opt = ctype match {
case "SVM" => new SVMWithSGD()
case "LR" => new LogisticRegressionWithSGD()
case "Regression" => new LinearRegressionWithSGD()
}
val reg: Updater = (regularizer: Regularizer) match {
case _: L1Regularizer => new L1Updater
case _: L2Regularizer => new SquaredL2Updater
case _: Unregularized => new SimpleUpdater
}
ctype match {
case "SVM" => opt.asInstanceOf[SVMWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "LR" => opt.asInstanceOf[LogisticRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "Regression" => opt.asInstanceOf[LinearRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
}
override def optimize(): Vector[Double] = {
val model = opt.run(data)
val w = model.weights.toArray
DenseVector(w)
}
}