当前位置: 首页>>代码示例>>Scala>>正文


Scala GenSeq类代码示例

本文整理汇总了Scala中scala.collection.GenSeq的典型用法代码示例。如果您正苦于以下问题:Scala GenSeq类的具体用法?Scala GenSeq怎么用?Scala GenSeq使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了GenSeq类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: sumElements

//设置package包名称以及导入依赖的类
import scala.annotation.tailrec
import scala.collection.GenSeq
import scala.math.sqrt

package object PageRank {

  type LinkMat = GenSeq[(Int,Int)]

  def sumElements(R: GenSeq[Float], A: LinkMat, j: Int): Float = {
    // sums all PageRanks / number of links for a column j
    val totalLinks = A.filter(_._2==j)
    if (totalLinks.isEmpty) sys.error("No link in the page " + j + " at sumElements")
    else R(j)/totalLinks.size
  }

  // find all pages pointing to i A(i,j) exists
  def findConnected(i: Int, A: LinkMat):GenSeq[Int] = A.filter(_._1==i).map(_._2).toSeq

  def converged(r1: GenSeq[Float], r2: GenSeq[Float], eps: Float): Boolean = {
    val totSquare: Float = r1.zip(r2).map(p=>(p._1-p._2)*(p._1-p._2)).sum
    sqrt(totSquare/r1.size)<=eps
  }

  @tailrec def compRank(R: GenSeq[Float], A: LinkMat, damp: Float, eps: Float, niter: Int = 0,niterMax: Int = 10000): GenSeq[Float] = {
    val rankIndex: GenSeq[Int] = 0 until R.size
    val rightRank: GenSeq[Float] = rankIndex map{i:Int =>
      val connected = findConnected(i,A)
      connected.map{j:Int => sumElements(R, A, j)}.sum
    }
    val newRank = rightRank map {damp*_ + (1-damp)/R.size}
    if(converged(newRank,R,eps)) newRank
    else if(niter>=niterMax) {
      println("Max iteration reached")
      newRank
    } else compRank(newRank,A,damp,eps,niter+1,niterMax)
  }
} 
开发者ID:mbesancon,项目名称:PageRank,代码行数:38,代码来源:package.scala

示例2: MeanAndSD

//设置package包名称以及导入依赖的类
object MeanAndSD {

  import scala.collection.GenSeq

  def meanAndSD(x: GenSeq[Double]): (Double, Double) = {
    val n = x.length
    val sx = x.sum
    val ssx = (x map (xi=>(xi*xi))).sum
    val v = (ssx - sx*sx/n)/(n-1)
    (sx/n, math.sqrt(v))
  }

  def meanAndSD1pass(x: GenSeq[Double]): (Double, Double) = {
    val n = x.length
    val (sx,ssx) = x.foldLeft((0.0,0.0): (Double,Double))(
      (p,xi) => (p._1+xi,p._2+xi*xi)
    )
    val v = (ssx - sx*sx/n)/(n-1)
    (sx/n, math.sqrt(v))
  }

  def meanAndSD1passPar(x: GenSeq[Double]): (Double, Double) = {
    val n = x.length
    val (sx,ssx) = x.aggregate((0.0,0.0): (Double,Double))(
      (p,xi) => (p._1+xi,p._2+xi*xi),
      (p1,p2) => (p1._1+p2._1, p1._2+p2._2)
    )
    val v = (ssx - sx*sx/n)/(n-1)
    (sx/n, math.sqrt(v))
  }

  def time[A](f: => A) = {
    val s = System.nanoTime
    val ret = f
    println("time: " + (System.nanoTime - s) / 1e6 + "ms")
    ret
  }


  def main(args: Array[String]): Unit = {
    println("hello")
    println(meanAndSD(Vector(1,4,7)))
    println(meanAndSD1pass(Vector(1,4,7)))
    println(meanAndSD1passPar(Vector(1,4,7)))
    val x = Vector.fill(10000000)(math.random)
    val xp = x.par
    time { println(meanAndSD(x)) }
    time { println(meanAndSD(xp)) }
    time { println(meanAndSD1pass(x)) }
    time { println(meanAndSD1pass(xp)) }
    time { println(meanAndSD1passPar(x)) }
    time { println(meanAndSD1passPar(xp)) }
    println("goodbye")
  }

}


// eof 
开发者ID:darrenjw,项目名称:scala-course-exsol,代码行数:60,代码来源:meanandsd.scala

示例3: KeyValueGroupedDataSetAPIScalaParallelLazyImpl

//设置package包名称以及导入依赖的类
package com.datawizards.sparklocal.impl.scala.parallellazy.dataset

import com.datawizards.sparklocal.dataset.KeyValueGroupedDataSetAPI
import com.datawizards.sparklocal.impl.scala.dataset.{DataSetAPIScalaBase, KeyValueGroupedDataSetAPIScalaBase}
import org.apache.spark.sql.Encoder

import scala.collection.{GenIterable, GenSeq}
import scala.reflect.ClassTag

class KeyValueGroupedDataSetAPIScalaParallelLazyImpl[K: ClassTag, T: ClassTag](private[sparklocal] val data: Map[K, GenSeq[T]]) extends KeyValueGroupedDataSetAPIScalaBase[K, T] {
  override type InternalCollection = Map[K, GenSeq[T]]

  override private[sparklocal] def create[U: ClassTag](it: GenIterable[U])(implicit enc: Encoder[U]=null): DataSetAPIScalaBase[U] =
    DataSetAPIScalaParallelLazyImpl.create(it)

  override def mapValues[W: ClassTag](func: (T) => W)
                                     (implicit enc: Encoder[W]=null): KeyValueGroupedDataSetAPI[K, W] = {
    val mapped = data.mapValues(_.map(func))
    new KeyValueGroupedDataSetAPIScalaParallelLazyImpl(mapped)
  }

} 
开发者ID:piotr-kalanski,项目名称:spark-local,代码行数:23,代码来源:KeyValueGroupedDataSetAPIScalaParallelLazyImpl.scala

示例4: companion

//设置package包名称以及导入依赖的类
package scala
package collection.parallel

import scala.collection.generic.GenericCompanion
import scala.collection.generic.GenericParCompanion
import scala.collection.generic.GenericParTemplate
import scala.collection.generic.ParFactory
import scala.collection.generic.CanCombineFrom
import scala.collection.GenSeq
import scala.collection.parallel.mutable.ParArrayCombiner


trait ParSeq[+T] extends GenSeq[T]
                    with ParIterable[T]
                    with GenericParTemplate[T, ParSeq]
                    with ParSeqLike[T, ParSeq[T], Seq[T]]
{
  override def companion: GenericCompanion[ParSeq] with GenericParCompanion[ParSeq] = ParSeq
  //protected[this] override def newBuilder = ParSeq.newBuilder[T]

  def apply(i: Int): T

  override def toString = super[ParIterable].toString

  override def stringPrefix = getClass.getSimpleName
}

object ParSeq extends ParFactory[ParSeq] {
  implicit def canBuildFrom[T]: CanCombineFrom[Coll, T, ParSeq[T]] = new GenericCanCombineFrom[T]

  def newBuilder[T]: Combiner[T, ParSeq[T]] = ParArrayCombiner[T]
  def newCombiner[T]: Combiner[T, ParSeq[T]] = ParArrayCombiner[T]
} 
开发者ID:scala,项目名称:scala-parallel-collections,代码行数:34,代码来源:ParSeq.scala


注:本文中的scala.collection.GenSeq类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。