本文整理汇总了Scala中scala.collection.Map类的典型用法代码示例。如果您正苦于以下问题:Scala Map类的具体用法?Scala Map怎么用?Scala Map使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Map类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: LRPmmlEvaluator
//设置package包名称以及导入依赖的类
package com.inneractive.cerberus
import java.io.{File, FileInputStream}
import java.util
import javax.xml.transform.stream.StreamSource
import org.dmg.pmml.{FieldName, Model, PMML}
import org.jpmml.evaluator.{EvaluatorUtil, FieldValue, ModelEvaluator, ModelEvaluatorFactory}
import org.jpmml.model.JAXBUtil
import scala.collection.Map
import scala.util.Try
class LRPmmlEvaluator(modelLocation: String, threshold: Option[Double]) {
val inputStream = new FileInputStream(new File(modelLocation, "/model/PMML/PMML.xml"))
val pmmModel: PMML = JAXBUtil.unmarshalPMML(new StreamSource(inputStream))
val evaluatorFactory: ModelEvaluatorFactory = ModelEvaluatorFactory.newInstance()
val evaluator: ModelEvaluator[_ <: Model] = evaluatorFactory.newModelManager(pmmModel)
val activeFields: util.List[FieldName] = evaluator.getActiveFields
val targetFields: util.List[FieldName] = EvaluatorUtil.getTargetFields(evaluator)
val outputFields: util.List[FieldName] = EvaluatorUtil.getOutputFields(evaluator)
def arguments(eval: FieldName => (FieldName, FieldValue)) = (activeFields map eval).toMap
def getResults(result: java.util.Map[FieldName, _]) = {
val targets = targetFields map (f => EvaluatorUtil.decode(result.get(f)))
val outputs = outputFields map (f => result.get(f))
Tuple3(targets.head.asInstanceOf[Double], outputs.head.asInstanceOf[Double], outputs(1).asInstanceOf[Double])
}
def callEvaluator(params : Map[FieldName, _]) = getResults(evaluator.evaluate(params))
def predict(features: Features) = Try {
val r = callEvaluator(arguments((f: FieldName) => {
val value = features.vector(f.getValue)
val activeValue = evaluator.prepare(f, value)
(f, activeValue)
}))
new Prediction("", if (r._1 == 0.0) true else false, if (r._1 == 0) r._2 else r._3)
}
}
示例2: RDDJoinExamples
//设置package包名称以及导入依赖的类
package com.highperformancespark.examples.goldilocks
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD
import scala.collection.Map
import scala.reflect.ClassTag
object RDDJoinExamples {
//tag::coreBroadCast[]
def manualBroadCastHashJoin[K : Ordering : ClassTag, V1 : ClassTag,
V2 : ClassTag](bigRDD : RDD[(K, V1)],
smallRDD : RDD[(K, V2)])= {
val smallRDDLocal: Map[K, V2] = smallRDD.collectAsMap()
bigRDD.sparkContext.broadcast(smallRDDLocal)
bigRDD.mapPartitions(iter => {
iter.flatMap{
case (k,v1 ) =>
smallRDDLocal.get(k) match {
case None => Seq.empty[(K, (V1, V2))]
case Some(v2) => Seq((k, (v1, v2)))
}
}
}, preservesPartitioning = true)
}
//end:coreBroadCast[]
}
示例3: ParallelCollectionLRDD
//设置package包名称以及导入依赖的类
package org.apache.spark.lineage.rdd
import org.apache.spark.OneToOneDependency
import org.apache.spark.lineage.LineageContext
import org.apache.spark.rdd.ParallelCollectionRDD
import scala.collection.Map
import scala.reflect._
private[spark] class ParallelCollectionLRDD[T: ClassTag](
@transient lc: LineageContext,
@transient data: Seq[T],
numSlices: Int,
locationPrefs: Map[Int, Seq[String]])
extends ParallelCollectionRDD[T](lc.sparkContext, data, numSlices, locationPrefs)
with Lineage[T] {
override def lineageContext = lc
override def ttag: ClassTag[T] = classTag[T]
override def tapRight(): TapLRDD[T] = {
val tap = new TapParallelCollectionLRDD[T](lineageContext, Seq(new OneToOneDependency(this)))
setTap(tap)
setCaptureLineage(true)
tap
}
}
示例4: findNearestStreets
//设置package包名称以及导入依赖的类
package mapdomain.street
import base.{ LazyLoggerSupport, MeterSupport }
import mapdomain.graph._
import mapdomain.repository.street.{ StreetRepositorySupport, StreetVertexRepository }
import mapdomain.utils.GraphUtils
import scala.collection.Map
import scala.collection.concurrent.TrieMap
trait StreetGraphContainer extends GeoGraphContainer[StreetEdge, StreetVertex[StreetEdge]] {
def findNearestStreets(coordinate: Coordinate, radius: Double): List[StreetEdge]
def vertices: List[StreetVertex[StreetEdge]]
}
case class LazyStreetGraphContainer() extends StreetGraphContainer with StreetRepositorySupport {
protected val vertexById = new TrieMap[Long, StreetVertex[StreetEdge]]()
protected val totalVertices: Long = streetVertexRepository.totalVertices
override def vertices: List[StreetVertex[StreetEdge]] = {
if (totalVertices != vertexById.size) {
vertexById.keys
}
StreetVertexRepository.findAll
}
override def findNearestStreets(coordinate: Coordinate, radius: Double): List[StreetEdge] = streetEdgeRepository.findNearestStreets(coordinate, radius)
override def findNearest(coordinate: Coordinate): Option[StreetVertex[StreetEdge]] = streetVertexRepository.findNearest(coordinate)
def purgeStreets: UnsavedStreetGraphContainer = withTimeLogging({
logger.info(s"Purge the unsaved street graph in order to get a connected graph")
GraphUtils.getConnectedComponent[StreetEdgeUnsaved, UnsavedStreetVertex, UnsavedStreetGraphContainer](this, UnsavedStreetGraphContainer.apply)
}, (time: Long) ? logger.info(s"Street graph was purged in $time ms."))
}
示例5: Prediction
//设置package包名称以及导入依赖的类
package dagger.ml
import scala.util.Random
import scala.collection.Map
/**
* Created by narad on 8/8/14.
*/
case class Prediction[T](label2score: Map[T, Float], entropy: Double = 0.0) {
def randomMaxLabel(random: Random) = {
assert(maxLabels.nonEmpty)
val r = random.nextInt(maxLabels.size)
maxLabels(r)
}
lazy val maxScore: Double = label2score.maxBy(_._2)._2
lazy val maxLabels: Seq[T] = label2score.toSeq.filter(_._2 == maxScore).map(_._1)
def maxLabelsWithinThreshold(threshold: Double, optionLimit: Int): Seq[(T, Float)] = {
val withinThreshold = label2score.toSeq.filter(_._2 >= maxScore - threshold).map {
x => ((x._1, (x._2 - maxScore).toFloat))
}
withinThreshold.sortBy(-_._2).take(optionLimit)
}
lazy val maxLabel = maxLabels.head
override def toString = {
"Prediction:\n" +
label2score.keys.map { k =>
" %s: %f".format(k, label2score(k))
}
}
}
示例6: PassiveAggressiveUpdate
//设置package包名称以及导入依赖的类
package dagger.ml
import scala.reflect.ClassTag
import scala.collection.Map
import scala.collection.mutable.HashMap
class PassiveAggressiveUpdate[T: ClassTag] extends PerceptronUpdateRule {
override def update(instance: Instance[T], classifier: AROWClassifier[T], options: AROWOptions): Unit = {
val prediction = classifier.predict(instance)
val temp = instance.correctLabels.map(l => (l, prediction.label2score(l))).toArray.sortBy(_._2)
if (temp.isEmpty) println("No Correct Labels found for: \n" + instance)
val weightVectors = classifier.weights
val cachedWeightVectors = classifier.cachedWeights
val maxLabel = prediction.maxLabel
val maxScore = prediction.maxScore
val (minCorrectLabel, minCorrectScore) = temp.head
val labelList = instance.labels
val iMaxLabel = labelList.indexOf(maxLabel)
val icost = instance.costOf(maxLabel)
val maxWeightLabel = instance.weightLabels(iMaxLabel)
val iMinCorrectLabel = labelList.indexOf(minCorrectLabel)
val minCorrectWeightLabel = instance.weightLabels(iMinCorrectLabel)
val loss = (maxScore - minCorrectScore + math.sqrt(instance.costOf(maxLabel))).toFloat
val norm = 2 * (AROW.dotMap(instance.feats(iMinCorrectLabel), instance.feats(iMinCorrectLabel)))
val factor = loss / (norm + (1.0f / (2 * options.SMOOTHING.toFloat)))
AROW.add(weightVectors(maxWeightLabel), instance.feats(iMaxLabel), -1.0f * factor)
AROW.add(weightVectors(minCorrectWeightLabel), instance.feats(iMinCorrectLabel), factor)
if (options.AVERAGING) {
AROW.add(cachedWeightVectors(maxWeightLabel), instance.feats(iMaxLabel), -1.0f * factor * classifier.averagingCounter)
AROW.add(cachedWeightVectors(minCorrectWeightLabel), instance.feats(iMinCorrectLabel), factor * classifier.averagingCounter)
}
}
}
示例7: DecisionTreeUtil
//设置package包名称以及导入依赖的类
package org.sparksamples.decisiontree
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.rdd.RDD
import org.sparksamples.Util
import scala.collection.Map
import scala.collection.mutable.ListBuffer
object DecisionTreeUtil {
def getTrainTestData(): (RDD[LabeledPoint], RDD[LabeledPoint]) = {
val recordsArray = Util.getRecords()
val records = recordsArray._1
val first = records.first()
val numData = recordsArray._2
println(numData.toString())
records.cache()
print("Mapping of first categorical feature column: " + Util.get_mapping(records, 2))
var list = new ListBuffer[Map[String, Long]]()
for( i <- 2 to 9){
val m = Util.get_mapping(records, i)
list += m
}
val mappings = list.toList
var catLen = 0
mappings.foreach( m => (catLen +=m.size))
val numLen = records.first().slice(11, 15).size
val totalLen = catLen + numLen
val data = {
records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
}
val data_dt = {
records.map(r => LabeledPoint(Util.extractLabel(r), Util.extract_features_dt(r)))
}
val splits = data_dt.randomSplit(Array(0.8, 0.2), seed = 11L)
val training = splits(0).cache()
val test = splits(1)
return (training, test)
}
def evaluate(train: RDD[LabeledPoint],test: RDD[LabeledPoint],
categoricalFeaturesInfo: scala.Predef.Map[Int, Int],
maxDepth :Int, maxBins: Int): Double = {
val impurity = "variance"
val decisionTreeModel = DecisionTree.trainRegressor(train, categoricalFeaturesInfo,
impurity,maxDepth, maxBins )
val true_vs_predicted = test.map(p => (p.label, decisionTreeModel.predict(p.features)))
val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())
return rmsle
}
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-with-Spark-Second-Edition,代码行数:61,代码来源:DecisionTreeUtil.scala
示例8: encode
//设置package包名称以及导入依赖的类
package com.ox.bigdata.util.kafka
import kafka.common.KafkaException
import kafka.utils.Logging
import scala.collection.{Iterable, Map}
import scala.util.parsing.json.JSON
def encode(obj: Any): String = {
obj match {
case null => "null"
case b: Boolean => b.toString
case s: String => "\"" + s + "\""
case n: Number => n.toString
case m: Map[_, _] =>
"{" +
m.map(elem =>
elem match {
case t: Tuple2[_, _] => encode(t._1) + ":" + encode(t._2)
case _ => throw new IllegalArgumentException("Invalid map element (" + elem + ") in " + obj)
}).mkString(",") +
"}"
case a: Array[_] => encode(a.toSeq)
case i: Iterable[_] => "[" + i.map(encode).mkString(",") + "]"
case other: AnyRef => throw new IllegalArgumentException("Unknown arguement of type " + other.getClass + ": " + other)
}
}
}
示例9: CompareColors
//设置package包名称以及导入依赖的类
package org.kneelawk.imagegenerator2.comparecolors
import java.awt.image.BufferedImage
import scala.collection.Map
import org.kneelawk.imagegenerator2.ImageGenerator
import org.kneelawk.imagegenerator2.util.MathUtil
import javax.imageio.ImageIO
import java.io.File
object CompareColors extends ImageGenerator {
def name: String = "CompareColors"
def options: Seq[(String, String)] = Array(
("path", "Path to starting image"))
def apply(i: BufferedImage, options: Map[String, String], width: Int, height: Int): Unit = {
import MathUtil._
val input = ImageIO.read(new File(options("path")))
if (input.getWidth != width || input.getHeight != height) throw new IllegalArgumentException("Input and Output image sizes must mach")
for (y <- 0 until height; x <- 0 until width) {
val r = input.getRGB(mod2(x + 1, 0, width), y)
val l = input.getRGB(mod2(x - 1, 0, width), y)
val d = input.getRGB(x, mod2(y + 1, 0, height))
val u = input.getRGB(x, mod2(y - 1, 0, height))
val c = input.getRGB(x, y)
if (r != c || l != c || d != c || u != c) {
i.setRGB(x, y, 0xFFFFFFFF)
} else {
i.setRGB(x, y, 0x00000000)
}
}
}
}
示例10: MultiMap
//设置package包名称以及导入依赖的类
package com.github.tarao.namedcap
import scala.collection.immutable.{Map => ImmutableMap}
import scala.collection.Map
object MultiMap {
def apply(): MultiMap = new MultiMap(Map.empty)
def apply[S <: Seq[String]](m: Map[String, S]): MultiMap = new MultiMap(m)
val empty = MultiMap()
}
class MultiMap(m: Map[String, Seq[String]])
extends ImmutableMap[String, String] {
def get(key: String): Option[String] = m.get(key).flatMap(_.headOption)
def getAll(key: String): Seq[String] = m.getOrElse(key, Seq.empty)
def +[V >: String](kv: (String, V)): MultiMap = {
val (key, value) = kv
val list = m.getOrElse(key, Vector.empty) :+ value.asInstanceOf[String]
new MultiMap(m + (key -> list))
}
def -(key: String): MultiMap = new MultiMap(m - key)
def iterator: Iterator[(String, String)] =
m.iterator.flatMap(kv => kv._2.headOption.map(kv._1 -> _))
override def default(key: String): String =
m.default(key).headOption.getOrElse {
throw new NoSuchElementException("key not found: " + key)
}
}
示例11: GenMapLikePartialTest
//设置package包名称以及导入依赖的类
package org.danielnixon.extrawarts
import org.scalatest.FunSuite
import org.wartremover.test.WartTestTraverser
import scala.collection.{ GenMapLike, Map }
class GenMapLikePartialTest extends FunSuite {
val map: GenMapLike[String, Int, Map[String, Int]] = Map[String, Int]()
test("can't use GenMapLike#apply") {
val result = WartTestTraverser(GenMapLikePartial) {
val foo = map("foo")
}
assertResult(List("[wartremover:GenMapLikePartial] GenMapLike#apply is disabled - use GenMapLike#get instead"), "result.errors")(result.errors)
assertResult(List.empty, "result.warnings")(result.warnings)
}
test("doesn't detect other `apply` methods") {
val result = WartTestTraverser(GenMapLikePartial) {
case class A(apply: Int)
println(A(1).apply)
}
assertResult(List.empty, "result.errors")(result.errors)
assertResult(List.empty, "result.warnings")(result.warnings)
}
test("GenMapLikePartial wart obeys SuppressWarnings") {
val result = WartTestTraverser(GenMapLikePartial) {
@SuppressWarnings(Array("org.danielnixon.extrawarts.GenMapLikePartial"))
val foo = map("foo")
}
assertResult(List.empty, "result.errors")(result.errors)
assertResult(List.empty, "result.warnings")(result.warnings)
}
}
示例12: parallelize
//设置package包名称以及导入依赖的类
package com.datawizards.sparklocal.rdd
import org.apache.spark.Partitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import scala.collection.{GenIterable, GenMap, Map}
import scala.reflect.ClassTag
trait PairRDDFunctionsAPI[K, V] {
protected lazy val spark: SparkSession = SparkSession.builder().getOrCreate()
protected def parallelize[That: ClassTag](d: Seq[That]): RDD[That] = spark.sparkContext.parallelize(d)
protected def parallelize[That: ClassTag](d: GenIterable[That]): RDD[That] = parallelize(d.toList)
def mapValues[U: ClassTag](f: (V) => U): RDDAPI[(K, U)]
def keys: RDDAPI[K]
def values: RDDAPI[V]
def flatMapValues[U: ClassTag](f: (V) => TraversableOnce[U]): RDDAPI[(K, U)]
def countByKey(): GenMap[K, Long]
def reduceByKey(func: (V, V) => V): RDDAPI[(K, V)]
def reduceByKey(func: (V, V) => V, numPartitions: Int): RDDAPI[(K, V)]
def reduceByKey(partitioner: Partitioner, func: (V, V) => V): RDDAPI[(K, V)]
def reduceByKeyLocally(func: (V, V) => V): Map[K, V]
def groupByKey(): RDDAPI[(K, GenIterable[V])]
def groupByKey(numPartitions: Int): RDDAPI[(K, GenIterable[V])]
def groupByKey(partitioner: Partitioner): RDDAPI[(K, GenIterable[V])]
def foldByKey(zeroValue: V)(func: (V, V) => V): RDDAPI[(K, V)]
def foldByKey(zeroValue: V, numPartitions: Int)(func: (V, V) => V): RDDAPI[(K, V)]
def foldByKey(zeroValue: V, partitioner: Partitioner)(func: (V, V) => V): RDDAPI[(K, V)]
def join[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, W))]
def join[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, W))]
def join[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, W))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, Option[W]))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, Option[W]))]
def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, Option[W]))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], W))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], W))]
def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], W))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], Option[W]))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], Option[W]))]
def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], Option[W]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
def collectAsMap(): GenMap[K, V]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, V)]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, V)]
def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], p: Partitioner): RDDAPI[(K, V)]
def aggregateByKey[U: ClassTag](zeroValue: U)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def aggregateByKey[U: ClassTag](zeroValue: U, partitioner: Partitioner)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def aggregateByKey[U: ClassTag](zeroValue: U, numPartitions: Int)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
def partitionBy(partitioner: Partitioner): RDDAPI[(K, V)]
}
示例13: values
//设置package包名称以及导入依赖的类
package com.danielwestheide.kontextfrei
import scala.collection.Map
import scala.reflect.ClassTag
private[kontextfrei] trait DCollectionPairFunctions[DCollection[_]] {
def values[A: ClassTag, B: ClassTag](x: DCollection[(A, B)]): DCollection[B]
def keys[A: ClassTag, B: ClassTag](x: DCollection[(A, B)]): DCollection[A]
def cogroup[A: ClassTag, B: ClassTag, C: ClassTag](x: DCollection[(A, B)])(
y: DCollection[(A, C)]): DCollection[(A, (Iterable[B], Iterable[C]))]
def leftOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
x: DCollection[(A, B)])(
y: DCollection[(A, C)]): DCollection[(A, (B, Option[C]))]
def rightOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
x: DCollection[(A, B)])(
y: DCollection[(A, C)]): DCollection[(A, (Option[B], C))]
def fullOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
x: DCollection[(A, B)])(
y: DCollection[(A, C)]): DCollection[(A, (Option[B], Option[C]))]
def mapValues[A: ClassTag, B: ClassTag, C: ClassTag](x: DCollection[(A, B)])(
f: B => C): DCollection[(A, C)]
def flatMapValues[A: ClassTag, B: ClassTag, C: ClassTag](
xs: DCollection[(A, B)])(f: B => TraversableOnce[C]): DCollection[(A, C)]
def foldByKey[A: ClassTag, B: ClassTag](xs: DCollection[(A, B)])(
zeroValue: B,
func: (B, B) => B): DCollection[(A, B)]
def reduceByKey[A: ClassTag, B: ClassTag](xs: DCollection[(A, B)])(
f: (B, B) => B): DCollection[(A, B)]
def aggregateByKey[A: ClassTag, B: ClassTag, C: ClassTag](
xs: DCollection[(A, B)])(zeroValue: C)(
seqOp: (C, B) => C,
combOp: (C, C) => C): DCollection[(A, C)]
def combineByKey[A: ClassTag, B: ClassTag, C: ClassTag](
xs: DCollection[(A, B)])(createCombiner: B => C)(
mergeValue: (C, B) => C,
mergeCombiners: (C, C) => C): DCollection[(A, C)]
def countByKey[A: ClassTag, B: ClassTag](
xs: DCollection[(A, B)]): Map[A, Long]
def collectAsMap[A: ClassTag, B: ClassTag](
xs: DCollection[(A, B)]): Map[A, B]
}
示例14: SparkWordCountJob
//设置package包名称以及导入依赖的类
package com.spark.scala.core
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import scala.collection.Map
import org.apache.commons.io.FileUtils
import java.io.File
import java.util.Properties
class SparkWordCountJob (sc: SparkContext) {
//reads data and computes the results
def run(t: String , outputFile: String) = {
val readsdata = sc.textFile(t)
// Transform into word and count.
val words = readsdata.flatMap(word => word.split(" "))
val counts = words.map(word => (word, 1)).reduceByKey(_ + _)
// Save the word count back out to a text file
counts.saveAsTextFile(outputFile)
}
//Delete results folder if exists
def deleteFile(proprietes: Properties) = {
if (new File((proprietes.getProperty("Output").toString())).exists ()) {
FileUtils.deleteDirectory (new File((proprietes.getProperty("Output").toString())));
}
}
}
示例15: ShipMap
//设置package包名称以及导入依赖的类
package com.harry0000.kancolle
import scala.collection.{Map, Seq, mutable}
import scala.collection.immutable.TreeMap
package object ac {
type ShipName = String
type ShipMap = TreeMap[ShipCategory, Seq[ShipName]]
object ShipMap {
def empty: ShipMap = TreeMap.empty[ShipCategory, Seq[ShipName]]
def apply(value: (ShipCategory, Seq[ShipName]) *): ShipMap = TreeMap(value: _*)
}
sealed abstract class ShipCategory(val name: String)
object ShipCategory {
case object Destroyer extends ShipCategory("???")
case object LightCruiser extends ShipCategory("??")
case object HeavyCruiser extends ShipCategory("??")
case object SeaplaneTender extends ShipCategory("?????")
case object AircraftCarrier extends ShipCategory("??")
case object Submarine extends ShipCategory("???")
case object Battleship extends ShipCategory("??")
private val order: Map[ShipCategory, Int] = mutable.LinkedHashMap.empty ++ Seq(
Destroyer,
LightCruiser,
HeavyCruiser,
SeaplaneTender,
AircraftCarrier,
Submarine,
Battleship
).zipWithIndex
implicit val ordering: Ordering[ShipCategory] = Ordering.by(order.getOrElse(_, Int.MaxValue))
def apply(shipType: String): ShipCategory = shipType match {
case "??" => Destroyer
case "??" => LightCruiser
case "??" => HeavyCruiser
case "??" => SeaplaneTender
case "??" => AircraftCarrier
case "??" => AircraftCarrier
case "??" => Submarine
case "??" => Battleship
}
def get(index: Int): Option[ShipCategory] = order.find(_._2 == index).map(_._1)
def values: Seq[ShipCategory] = order.keys.toSeq
}
}