当前位置: 首页>>代码示例>>Scala>>正文


Scala Vector类代码示例

本文整理汇总了Scala中breeze.linalg.Vector的典型用法代码示例。如果您正苦于以下问题:Scala Vector类的具体用法?Scala Vector怎么用?Scala Vector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Vector类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: trainDecay

//设置package包名称以及导入依赖的类
package com.esri

import breeze.linalg.Vector
import org.apache.commons.math3.util.FastMath

import scala.annotation.tailrec
import scala.math._


  def trainDecay(trainVec: Seq[Vector[Double]],
                 epochMax: Int,
                 alphaDecay: Decay,
                 radiusDecay: Decay
                )(implicit pb: ProgressBar = NoopProgressBar()): Unit = {

    val trainLen = trainVec.length
    val rnd = new java.security.SecureRandom()

    @tailrec
    def _train(epoch: Int): Unit = {
      if (epoch < epochMax) {
        val alpha = alphaDecay(epoch)
        val rad = radiusDecay(epoch)
        val vec = trainVec(rnd.nextInt(trainLen))
        train(vec, alpha, rad)
        pb.progress()
        _train(epoch + 1)
      }
    }

    _train(0)

    pb.finish()
  }
} 
开发者ID:mraad,项目名称:spark-som-path,代码行数:36,代码来源:SOM.scala

示例2: MllibLBFGS

//设置package包名称以及导入依赖的类
package optimizers

import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import utils.Functions._


class MllibLBFGS(val data: RDD[LabeledPoint],
                 loss: LossFunction,
                 regularizer: Regularizer,
                 params: LBFGSParameters
                ) extends Optimizer(loss, regularizer) {

  val opt = new LogisticRegressionWithLBFGS

  val reg: Updater = (regularizer: Regularizer) match {
    case _: L1Regularizer => new L1Updater
    case _: L2Regularizer => new SquaredL2Updater
    case _: Unregularized => new SimpleUpdater
  }

  opt.optimizer.
    setNumIterations(params.iterations).
    setConvergenceTol(params.convergenceTol).
    setNumCorrections(params.numCorrections).
    setRegParam(regularizer.lambda).
    setUpdater(reg)

  override def optimize(): Vector[Double] = {
    val model = opt.run(data)
    val w = model.weights.toArray
    return DenseVector(w)
  }
} 
开发者ID:mlbench,项目名称:mlbench,代码行数:38,代码来源:MllibLBFGS.scala

示例3: MllibSGD

//设置package包名称以及导入依赖的类
package optimizers

import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.{LogisticRegressionWithSGD, SVMWithSGD}
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.apache.spark.rdd.RDD
import utils.Functions._

import scala.tools.cmd.gen.AnyVals.D




class MllibSGD(val data: RDD[LabeledPoint],
               loss: LossFunction,
               regularizer: Regularizer,
               params: SGDParameters,
               ctype: String
              ) extends Optimizer(loss, regularizer) {
  val opt = ctype match {
    case "SVM" => new SVMWithSGD()
    case "LR" => new LogisticRegressionWithSGD()
    case "Regression" => new LinearRegressionWithSGD()
  }

  val reg: Updater = (regularizer: Regularizer) match {
    case _: L1Regularizer => new L1Updater
    case _: L2Regularizer => new SquaredL2Updater
    case _: Unregularized => new SimpleUpdater
  }

  ctype match {
    case "SVM" => opt.asInstanceOf[SVMWithSGD].optimizer.
      setNumIterations(params.iterations).
      setMiniBatchFraction(params.miniBatchFraction).
      setStepSize(params.stepSize).
      setRegParam(regularizer.lambda).
      setUpdater(reg)
    case "LR" => opt.asInstanceOf[LogisticRegressionWithSGD].optimizer.
      setNumIterations(params.iterations).
      setMiniBatchFraction(params.miniBatchFraction).
      setStepSize(params.stepSize).
      setRegParam(regularizer.lambda).
      setUpdater(reg)
    case "Regression" => opt.asInstanceOf[LinearRegressionWithSGD].optimizer.
      setNumIterations(params.iterations).
      setMiniBatchFraction(params.miniBatchFraction).
      setStepSize(params.stepSize).
      setRegParam(regularizer.lambda).
      setUpdater(reg)
  }

  override def optimize(): Vector[Double] = {
    val model = opt.run(data)
    val w = model.weights.toArray
    DenseVector(w)
  }
} 
开发者ID:mlbench,项目名称:mlbench,代码行数:60,代码来源:MllibSGD.scala

示例4: Cocoa

//设置package包名称以及导入依赖的类
package optimizers

import breeze.linalg.Vector
import distopt.solvers.CoCoA
import distopt.utils.DebugParams
import utils.Functions.{CocoaLabeledPoint, LossFunction, Regularizer}


class Cocoa(val data: CocoaLabeledPoint,
            loss: LossFunction,
            regularizer: Regularizer,
            params: CocoaParameters,
            debug: DebugParams,
            plus: Boolean) extends Optimizer(loss, regularizer){
  override def optimize(): Vector[Double] = {
    val (finalwCoCoAPlus, finalalphaCoCoAPlus) = CoCoA.runCoCoA(data, params.getDistOptPar(), debug, plus)
    return finalwCoCoAPlus
  }
} 
开发者ID:mlbench,项目名称:mlbench,代码行数:20,代码来源:Cocoa.scala

示例5: ProxCocoa

//设置package包名称以及导入依赖的类
package optimizers

import l1distopt.solvers.ProxCoCoAp
import l1distopt.utils.DebugParams
import utils.Functions.{LossFunction, ProxCocoaDataMatrix, Regularizer}
import breeze.linalg.Vector




class ProxCocoa(val data: ProxCocoaDataMatrix,
                loss: LossFunction,
                regularizer: Regularizer,
                params: ProxCocoaParameters,
                debug: DebugParams) extends Optimizer (loss, regularizer){
  override def optimize(): Vector[Double] = {
    val finalAlphaCoCoA = ProxCoCoAp.runProxCoCoAp(data._1, data._2, params.getL1DistOptPar(), debug)
    return finalAlphaCoCoA
  }
} 
开发者ID:mlbench,项目名称:mlbench,代码行数:21,代码来源:ProxCocoa.scala

示例6: sampleFeature

//设置package包名称以及导入依赖的类
package glintlda.naive

import breeze.linalg.{DenseVector, Vector}
import breeze.stats.distributions.Multinomial
import glintlda.LDAConfig
import glintlda.util.FastRNG


  def sampleFeature(feature: Int, oldTopic: Int): Int = {
    var i = 0
    val p = DenseVector.zeros[Double](config.topics)
    var sum = 0.0
    while (i < config.topics) {
      p(i) = (documentCounts(i) + ?) * ((wordCounts(i) + ?) / (globalCounts(i) + ?Sum))
      sum += p(i)
      i += 1
    }
    p /= sum
    Multinomial(p).draw()
  }

} 
开发者ID:rjagerman,项目名称:glintlda,代码行数:23,代码来源:Sampler.scala

示例7: process_bayes

//设置package包名称以及导入依赖的类
package twc.predict

import breeze.linalg.Vector
import com.kunyandata.nlpsuit.util.{KunyanConf, TextPreprocessing}
import com.kunyandata.nlpsuit.wordExtraction.TextRank
import org.apache.spark.mllib.feature.Word2VecModel


  //???, (??, ??)
  def process_bayes(doc: String, w2vModel: Word2VecModel, modelSize: Int, kunyan: KunyanConf, stopwords: Array[String]) = {

    val docSeg = TextPreprocessing.process(doc, stopwords, kunyan)

    //textRank
    val keywords = TextRank.run("k", 10, docSeg.toList, 20, 50, 0.85f)
    val keywordsFilter = keywords.toArray.filter(word => word._1.length >= 2)
    val result = doc2vecWithModel_weight_beyes(keywordsFilter, w2vModel, modelSize)

    result

  }

  // ??word2vec model ???????? ,??????, ????????
  private def doc2vecWithModel_weight_beyes(doc: Array[(String, Float)], model:Word2VecModel, modelSize: Int): Array[Double] = {

    var resultTemp = new Array[Double](modelSize)
    var wordTemp = new Array[Double](modelSize)

    doc.foreach(word => {
      try {
        wordTemp = model.transform(word._1).toArray
      }
      catch {
        case e: Exception => wordTemp = Vector.zeros[Double](modelSize).toArray
      }

      for (i <- resultTemp.indices){
        resultTemp(i) += wordTemp(i) * word._2
      }
    })

    val docVec = resultTemp.map(_+70)

    docVec
  }
} 
开发者ID:STHSF,项目名称:Word2Vec,代码行数:47,代码来源:Process.scala

示例8: SVM

//设置package包名称以及导入依赖的类
package edu.washington.cs.dericp.text.classification

import breeze.linalg.{DenseVector, Vector}

// class with util functions of an SVM
object SVM {

  case class DataPoint(x: Vector[Double], y:Double)

  // Takes theta, a data point with (feature vector, y classification), lambda, step
  // Returns the updated theta value
  def updateStep(theta: DenseVector[Double], p: DataPoint, lambda: Double, step: Int) = {
    val thetaShrink = theta * (1 - 1.0/step.toDouble)
    val margin = 1.0 - (p.y * theta.dot(p.x))
    if (margin <= 0)
      thetaShrink
    else
      thetaShrink + (p.x * (1.0 / (lambda * step)) * p.y)
  }

  // Trains and returns theta for one code
  def getTheta(code: String, codesToFeatureVectors: Seq[((Set[String], Int), DenseVector[Double])], numUniqueTerms: Int): DenseVector[Double] = {
    var theta = DenseVector.zeros[Double](numUniqueTerms)
    var timeStep = 1
    val lambda = 0.001

    for ((t, featureVector) <- codesToFeatureVectors) {
      val codes = t._1
      theta = updateStep(theta, new DataPoint(featureVector, if (codes.contains(code)) 1.0 else 0.0), lambda, timeStep)
      timeStep += 1
    }

    theta
  }
} 
开发者ID:dericp,项目名称:text-classification,代码行数:36,代码来源:SVM.scala

示例9: apply

//设置package包名称以及导入依赖的类
package neuralnet
import breeze.linalg.Vector
import breeze.numerics.{sech, sigmoid, tanh}


trait ActivationFunction {
  def apply(value : Vector[Double]) : Vector[Double] = call(value)
  def call(value : Vector[Double]): Vector[Double]
  def derivative(value : Vector[Double]) : Vector[Double]
}

object ActivationFunction {
  object Sigmoid extends ActivationFunction {
    override def call(value: Vector[Double]): Vector[Double] = {
      //1/(1+Math.exp(-value))
      sigmoid(value)
    }
    override def derivative(value: Vector[Double]): Vector[Double] = {
      val sig = call(value)
      sig :* (sig * -1.0 + 1.0)
    }
  }

  object TanH extends ActivationFunction {
    override def call(value: Vector[Double]): Vector[Double] = tanh(value)

    override def derivative(value: Vector[Double]): Vector[Double] = {
      val temp = sech(value)
      temp :*= temp
    }
  }

  object ReLu extends ActivationFunction {
    override def call(value: Vector[Double]): Vector[Double] = {
      value.map(x => Math.max(x,0.0))
    }
    override def derivative(value: Vector[Double]): Vector[Double] = {
      value
        .map(x => Math.max(x,0.0))
        .map(x => Math.min(x, 1.0))
    }
  }
} 
开发者ID:gewoonrik,项目名称:ScalaRNN,代码行数:44,代码来源:ActivationFunction.scala

示例10: KMeans

//设置package包名称以及导入依赖的类
package hu.sztaki.tutorial.spark

import breeze.linalg.{Vector, DenseVector, squaredDistance}

import org.apache.spark.{SparkConf, SparkContext}

object KMeans {
  def parseVector(line: String): Vector[Double] = {
    DenseVector(line.split(' ').map(_.toDouble))
  }

  def generateData(): List[String] = {
    (1 to 1000).map( i =>
      (1 to 5).map(j => Math.random()).mkString(" ")
    ).toList
  }

  def closestPoint(p: Vector[Double], centers: Array[Vector[Double]]): Int = {
    var bestIndex = 0
    var closest = Double.PositiveInfinity

    for (i <- 0 until centers.length) {
      val tempDist = squaredDistance(p, centers(i))
      if (tempDist < closest) {
        closest = tempDist
        bestIndex = i
      }
    }

    bestIndex
  }

  def main(args: Array[String]) {

    if (args.length < 2) {
      System.err.println("Usage: KMeans <k> <convergeDist>")
      System.exit(1)
    }

    val sparkConf = new SparkConf().setAppName("KMeans")
    val sc = new SparkContext(sparkConf)
    val lines = sc.parallelize(generateData())
    val K = args(0).toInt
    val convergeDist = args(1).toDouble
    var tempDist = 1.0

    

      println("Finished iteration (delta = " + tempDist + ")")
    }

    println("Final centers:")
    sc.stop()
  }
} 
开发者ID:zzvara,项目名称:sztaki-spark-workshop,代码行数:56,代码来源:KMeans.scala

示例11: LinearMethod

//设置package包名称以及导入依赖的类
import java.io.Serializable

import breeze.linalg.{DenseVector, Vector}
import optimizers.Optimizer
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import utils.Functions.{LossFunction, Regularizer}


abstract class LinearMethod(val loss: LossFunction,
                            val regularizer: Regularizer) extends Serializable {
  val optimizer: Optimizer
  var elapsed: Option[Long] = None

  def optimize(): Vector[Double] = {
    val start = System.nanoTime()
    val w: Vector[Double] = optimizer.optimize()
    val elap = System.nanoTime() - start

    elapsed = Some(elap)
    return w;
  }

  def predict(w: Vector[Double], test: RDD[org.apache.spark.mllib.linalg.Vector]): RDD[Double]

  def error(trueLabels: RDD[Double], predictions: RDD[Double]): Double

  def testError(w: Vector[Double], test: RDD[org.apache.spark.mllib.linalg.Vector], trueLabels: RDD[Double]): Double = {
    val predictions = predict(w, test)
    val err = error(trueLabels, predictions)
    return err
  }

  def getObjective(w: DenseVector[Double], x: RDD[LabeledPoint]): Double = {
    val n: Double = x.count()
    val sum = x.map(p => loss.loss(w, DenseVector(p.features.toArray), p.label)).reduce(_ + _)
    return regularizer.lambda * regularizer.value(w) + (sum / n);
  }
} 
开发者ID:evelinad,项目名称:distributed-ML-benchmark,代码行数:40,代码来源:LinearMethod.scala


注:本文中的breeze.linalg.Vector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。