当前位置: 首页>>代码示例>>Scala>>正文


Scala max类代码示例

本文整理汇总了Scala中scala.math.max的典型用法代码示例。如果您正苦于以下问题:Scala max类的具体用法?Scala max怎么用?Scala max使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了max类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: connectionScore

//设置package包名称以及导入依赖的类
package com.dataintuitive.luciuscore

import com.dataintuitive.luciuscore.Model._

import scala.math.{abs, max}


  def connectionScore(rv1: RankVector, rv2: RankVector): Double = {
    connectionStrength(rv1, rv2) / maxConnectionStrength(rv1, rv2)
  }

  def connectionStrength(rv1: RankVector, rv2: RankVector): Double =
    rv1
      .zip(rv2)
      .map { case (i, j) => i * j }
      .sum

  def maxConnectionStrength(rv1: RankVector, rv2: RankVector): Double = {
    val maxr = rv1.map(abs(_)).foldLeft(0.0)(max(_, _))
    val maxq = rv2.map(abs(_)).foldLeft(0.0)(max(_, _))
    (maxr to (maxr - maxq) by -1)
      .zip(maxq to 0 by -1)
      .map { case (i, j) => i * j }
      .sum
  }

} 
开发者ID:data-intuitive,项目名称:LuciusCore,代码行数:28,代码来源:ZhangScoreFunctions.scala

示例2: Application

//设置package包名称以及导入依赖的类
package controllers

import akka.actor.ActorSystem
import com.github.luqmansahaf.playlivy.LivyManager
import javax.inject.Inject
import org.apache.spark.sql.SparkSession
import play.api.mvc._
import play.api.Configuration
import play.Logger
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}


class Application @Inject() (val system: ActorSystem, config: Configuration, livyManager: LivyManager)
  extends Controller {

  
  def runPiExample(num: String) = Action {
    val number = num.toInt
    try {
      val handle = livyManager.submit(context => {
        import scala.math.{random,max,min}
        val spark: SparkSession = context.sparkSession
        // Pi calculation code
        // Adapted from: https://github.com/apache/spark/blob/master/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
        val slices = max(number,2)
        val n = min(100000L * slices, Int.MaxValue).toInt // avoid overflow
        val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
            val x = random * 2 - 1
            val y = random * 2 - 1
            if (x*x + y*y <= 1) 1 else 0
          }.reduce(_ + _)
        println("Pi is roughly " + 4.0 * count / (n - 1))
        4.0 * count / (n - 1)
      })
      Await.ready(handle, 60 second)
      handle.value match {
        case Some(Success(pi)) =>
          Logger.info(s"pi = $pi")
          Ok(pi.toString)
        case Some(Failure(e)) =>
          Logger.error(s"error occurred while computing pi: ${e.getMessage}")
          e.printStackTrace()
          Status(500)
        case None => Status(500)
      }
    } catch {
      case e: Exception =>
        Logger.error("error occurred")
        e.printStackTrace()
        Status(500)
    }
  }
} 
开发者ID:LuqmanSahaf,项目名称:Play-Livy-Module,代码行数:57,代码来源:Application.scala

示例3: Entropy

//设置package包名称以及导入依赖的类
package ru.ispras.modis.tm.chinesetm

import ru.ispras.modis.tm.documents.Document
import ru.ispras.modis.tm.matrix.{AttributedPhi, Theta}
import scala.math.{log, max}


class Entropy(private val values: Array[Map[Int, Float]]) {
    def apply(wordIndex: Int, documentIndex: Int) = values(documentIndex)(wordIndex)
}

object Entropy {
    def apply(documents: Seq[Document], phi: AttributedPhi, theta: Theta) = {
        val values = documents.map(document => processOneDocument(document, phi, theta)).toArray
        new Entropy(values)
    }


    private def processOneDocument(document: Document, phi: AttributedPhi, theta: Theta): Map[Int, Float] = {
         document.getAttributes(phi.attribute).map{case(word, num) =>
             word -> updateOneCell(phi, theta: Theta, document.serialNumber, word)
         }.toMap
    }

    private def updateOneCell(phi: AttributedPhi, theta: Theta, documentIndex: Int, wordIndex: Int): Float = {
        val topicIndexes = 0.until(phi.numberOfRows).toArray
        val denominator = topicIndexes.foldLeft(0d){(sum, topicIndex) =>
            calculateC(phi, theta, documentIndex, wordIndex, topicIndex) + sum
        }

        val result = topicIndexes.foldLeft(0d){(sum, topicIndex) =>
            val c = calculateC(phi, theta, documentIndex, wordIndex, topicIndex)
            sum + c / denominator * log(c / denominator)
        }

        -(result / log(phi.numberOfRows)).toFloat
    }


    private def calculateC(phi: AttributedPhi, theta: Theta, documentIndex: Int, wordIndex: Int, topicIndex: Int) = {
        max(Float.MinPositiveValue, phi.probability(topicIndex, wordIndex) * theta.probability(documentIndex, topicIndex))
    }
} 
开发者ID:ispras,项目名称:atr4s,代码行数:44,代码来源:Entropy.scala

示例4: SortedBlocks

//设置package包名称以及导入依赖的类
package de.fuberlin.wiwiss.silk.execution.methods

import de.fuberlin.wiwiss.silk.entity.{Path, Index, Entity}
import de.fuberlin.wiwiss.silk.linkagerule.LinkageRule
import de.fuberlin.wiwiss.silk.execution.ExecutionMethod
import scala.math.{min,max,pow}

case class SortedBlocks(sourceKey: Path, targetKey: Path, overlap: Double = 0.5) extends ExecutionMethod {

  private val minChar = 'a'
  private val maxChar = 'z'
  private val numChars = 3 //Maximum number of chars that will be indexed

  private val blockCount = pow((maxChar - minChar + 1), 2).toInt

  override def indexEntity(entity: Entity, rule: LinkageRule): Index = {
    val key = if(sourceKey.variable == entity.desc.variable) sourceKey else targetKey
    val values = entity.evaluate(key)

    values.map(indexValue).reduce(_ merge _)
  }

  private def indexValue(value: String): Index = {
    //Given a string, generate a value in the interval [0,1[
    var index = 0.0
    for(i <- 0 until min(numChars, value.length)) {
      //Make sure the character is inside the interval [minChar,maxChar]
      val croppedChar = min(max(value(i).toLower, minChar), maxChar)
      //Update index
      index += (croppedChar - minChar) / pow(maxChar - minChar + 1, i + 1)
    }

    //Generate index
    Index.continuous(
      value = index,
      minValue = 0.0,
      maxValue = 1.0,
      blockCount = blockCount,
      overlap = overlap
    )
  }
} 
开发者ID:petrovskip,项目名称:silk.2.6-GenLinkSA,代码行数:43,代码来源:SortedBlocks.scala

示例5: SemanticVersion

//设置package包名称以及导入依赖的类
package com.mchange.sc.v1.sbtethereum.compile

import scala.math.max

object SemanticVersion {

  // see https://docs.npmjs.com/misc/semver, sort of
  //
  // omitting the possibility of missing or non-numeric version-parts

  private val Regex = """^(\d+)\.(\d+)\.(\d+)(?:\W*)?$""".r

  implicit val DefaultOrdering = Ordering.by( (sv : SemanticVersion) => ( sv.major, sv.minor, sv.patch ) )

  def apply( versionString : String ) : SemanticVersion= {
    versionString match {
      case Regex( major, minor, patch ) => SemanticVersion( major.toInt, minor.toInt, patch.toInt )
      case _                            => throw new BadSolidityVersionException( versionString )
    }
  }

  def canBeCaretCompatible( a : SemanticVersion, b : SemanticVersion ) : Boolean = {
    if ( a.major == 0 ) {
      if ( a.minor == 0 ) {
        a.patch == b.patch
      } else {
        a.minor == b.minor
      }
    } else {
      a.major == b.major
    }
  }

  def restrictiveCaretCompatible( a : SemanticVersion, b : SemanticVersion ) : Option[SemanticVersion] = {
    if ( ! canBeCaretCompatible( a, b ) ) {
      None
    } else {
      Some( SemanticVersion( max( a.major, b.major ), max( a.minor, b.minor ), max( a.patch, b.patch ) ) )
    }
  }

  def restrictiveCaretCompatible( a : Option[SemanticVersion], b : Option[SemanticVersion] ) : Option[SemanticVersion] = {
    if ( a == None ) None else if ( b == None ) None else restrictiveCaretCompatible( a.get, b.get )
  }
}
final case class SemanticVersion( major : Int, minor : Int, patch : Int ) {
  def versionString = s"${major}.${minor}.${patch}"
} 
开发者ID:swaldman,项目名称:sbt-ethereum,代码行数:49,代码来源:SemanticVersion.scala


注:本文中的scala.math.max类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。