本文整理汇总了Scala中scala.collection.immutable.HashMap类的典型用法代码示例。如果您正苦于以下问题:Scala HashMap类的具体用法?Scala HashMap怎么用?Scala HashMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了HashMap类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: SimpleKafkaProducerTest
//设置package包名称以及导入依赖的类
package com.example
import java.nio.charset.StandardCharsets
import kafka.consumer.ConsumerConfig
import kafka.utils.TestUtils
import org.scalatest.{FunSpec, Matchers}
//import org.junit.Assert._
import scala.collection.immutable.HashMap
class SimpleKafkaProducerTest extends FunSpec with Matchers{
private val topic = "test"
private val groupId = "group0"
private val kafkaHelpers = new KafkaHelpers()
case class MessageData(a: String, b: String)
describe("The SimpleKafka Api") {
it("Should send data using a producer") {
//Send data to Kafka
val kafkaApi = new SimpleKafkaProducer(kafkaHelpers.kafkaSocket(), topic)
kafkaApi.send[MessageData](new MessageData("Hello", "World"))
//Create consumer
val consumerProperties = TestUtils.createConsumerProperties(kafkaHelpers.zookeeperSocket().toString(), groupId, "consumer0", -1)
val consumer = kafka.consumer.Consumer.create(new ConsumerConfig(consumerProperties))
val topicCountMap = HashMap(topic -> 1)
val consumerMap = consumer.createMessageStreams(topicCountMap)
val stream = consumerMap.get(topic).get(0)
val iterator = stream.iterator()
val msg = new String(iterator.next().message(), StandardCharsets.UTF_8)
assert("{\"a\":\"Hello\",\"b\":\"World\"}" == msg)
// cleanup
consumer.shutdown()
}
}
}
示例2: SimpleKafkaConsumer
//设置package包名称以及导入依赖的类
package com.example
import java.nio.charset.StandardCharsets
import java.util.Properties
import kafka.consumer.ConsumerConfig
import org.json4s.{DefaultFormats, jackson}
import scala.collection.immutable.HashMap
class SimpleKafkaConsumer(kafkaSocket: Socket, zooKeeperSocket: Socket, groupId: String, topic: String) {
private def configuration = {
val deserializer = "org.apache.kafka.common.serialization.StringDeserializer"
val props = new Properties()
props.put("bootstrap.servers", kafkaSocket.toString())
props.put("key.deserializer", deserializer)
props.put("value.deserializer", deserializer)
props.put("group.id", groupId)
props.put("consumer.id", "consumer0")
props.put("consumer.timeout", "-1")
props.put("auto.offset.reset", "smallest")
props.put("zookeeper.sync.time.ms", "200")
props.put("zookeeper.session.timeout.ms", "6000")
props.put("zookeeper.connect", zooKeeperSocket.toString())
props.put("num.consumer.fetchers", "2")
props.put("rebalance.max.retries", "4")
props.put("auto.commit.interval.ms", "1000")
props
}
private val consumer = kafka.consumer.Consumer.create(new ConsumerConfig(configuration))
def read[T <: AnyRef]()(implicit m: Manifest[T]): Iterable[T] = {
implicit val serialization = jackson.Serialization
implicit val formats = DefaultFormats
val topicCountMap = HashMap(topic -> 1)
val consumerMap = consumer.createMessageStreams(topicCountMap)
val stream = consumerMap.get(topic).get(0)
val iterator = stream.iterator()
iterator.map(x => serialization.read[T](new String(x.message(), StandardCharsets.UTF_8))).toStream
}
def shutdown() = {
consumer.shutdown()
}
}
示例3: RomanNumeralsSpec
//设置package包名称以及导入依赖的类
package com.jgibbons.romannumerals
import org.scalatest.FlatSpec
import scala.collection.immutable.HashMap
class RomanNumeralsSpec extends FlatSpec {
behavior of "RomanNumerals"
it should "Convert Properly" in {
val mapped = HashMap[Int, String](
1 -> "I", 2 -> "II", 3 -> "III", 4 -> "IV", 5 -> "V", 6 -> "VI", 7 -> "VII", 8 -> "VIII", 9 -> "IX", 10 -> "X",
11 -> "XI", 12 -> "XII", 13 -> "XIII", 14 -> "XIV", 15 -> "XV", 16 -> "XVI", 17 -> "XVII", 18 -> "XVIII", 19 -> "XIX", 20 -> "XX",
41 -> "XLI", 42 -> "XLII", 43 -> "XLIII", 44 -> "XLIV", 45 -> "XLV", 46 -> "XLVI", 47 -> "XLVII", 48 -> "XLVIII", 49 -> "XLIX",
50 -> "L", 51 -> "LI", 60 -> "LX", 64 -> "LXIV", 68 -> "LXVIII", 69 -> "LXIX",
90 -> "XC", 91 -> "XCI", 94 -> "XCIV", 96 -> "XCVI", 99 -> "XCIX",
100 -> "C", 140 -> "CXL", 144 -> "CXLIV", 149 -> "CXLIX",
189 -> "CLXXXIX", 190 -> "CXC", 194 -> "CXCIV", 199 -> "CXCIX",
989 -> "CMLXXXIX", 990 -> "CMXC", 994 -> "CMXCIV", 999 -> "CMXCIX",
1194 -> "MCXCIV", 1199 -> "MCXCIX",
1989 -> "MCMLXXXIX", 1990 -> "MCMXC",
5989 -> "MMMMMCMLXXXIX", 5990 -> "MMMMMCMXC", 5994 -> "MMMMMCMXCIV", 5999 -> "MMMMMCMXCIX"
)
mapped.foreach { case (k: Int, v: String) =>
assert(RomanNumerals.convertLongImperative(k) == v, s"convertImperative failed for $k")
assert(RomanNumerals.convertFoldLeft(k) == v, s"convertFoldLeft failed for $k")
assert(RomanNumerals.convertStrs(k) == v, s"convertStrs failed for $k")
assert(RomanNumerals.convertMixed(k) == v, s"convertMixed failed for $k")
assert(RomanNumerals.convertTailRecursive(k) == v, s"convertTailRecursive failed for $k")
assert(RomanNumerals.convertFoldAgain(k) == v, s"convertFoldAgain failed for $k")
assert(RomanNumerals.convertFoldTerse(k) == v, s"convertFoldTerse failed for $k")
assert(RomanNumerals.convert(k) == v, s"convert failed for $k")
assert(JpgRomanNumerals.convert(k) == v, s"JpgRomanNumerals.convert failed for $k")
}
}
}
示例4: SearchEngine
//设置package包名称以及导入依赖的类
package com.ferran.searchEngine
import java.io.File
import com.ferran.searchEngine.tools.{FileLoader, Tools}
import scala.collection.immutable.HashMap
import scala.io.Source
object SearchEngine {
type DocId = Int
type DocName = String
type Dictionary = HashMap[String, Map[DocId, DocName]]
type RankingList = List[(DocId, DocName, Int)]
def loadWords(dictionary: Dictionary, file: File, index: Int):Dictionary = {
val fileName = file.getName
Tools.wordSplitter(Source.fromFile(file).mkString)
.foldLeft(dictionary){
case (dict, word) =>
dict.get(word.toLowerCase) match {
case Some(docs) =>
dict.updated(word.toLowerCase,
{
if(docs.contains(index)) docs
else docs ++ Map(index -> fileName)
}
)
case None =>
dict.updated(word.toLowerCase, Map(index -> fileName))
}
}
}
final def main(args:Array[String]) = {
val pathFolder = args(0)
val filesOpt = FileLoader.getFilesList(pathFolder)
// Create our Dictionary
val dictionary =
filesOpt.map{
files =>
files.zipWithIndex.foldLeft(HashMap.empty[String, Map[DocId, DocName]]){
case (dict, (file, index)) => loadWords(dict, file, index)
}
}
.getOrElse(HashMap.empty)
println(s"Files loaded")
print(s"search> ")
var line = scala.io.StdIn.readLine()
while(line != ":quit"){
Ranking.printRanking(Ranking.getRanking(Dictionary.searchWords(dictionary,line)))
print(s"search> ")
line = scala.io.StdIn.readLine()
}
}
}
示例5: UserGroupInformationLoginUtil
//设置package包名称以及导入依赖的类
package im.yanchen.krb5.auth.passwd
import javax.security.auth.Subject
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag
import javax.security.auth.login.{AppConfigurationEntry, LoginContext}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod
import org.apache.hadoop.security.authentication.util.KerberosUtil
import scala.collection.JavaConverters._
import scala.collection.immutable.HashMap
object UserGroupInformationLoginUtil {
def loginUserFromPasswordAndReturnUGI(user: String, password: String): UserGroupInformation = {
this.synchronized {
if (!UserGroupInformation.isSecurityEnabled) {
UserGroupInformation.getCurrentUser
} else {
val krbOptions = HashMap[String, String](
"doNotPrompt" -> "false",
"useTicketCache" -> "false",
"useKeyTab" -> "false",
"renewTGT" -> "false"
).asJava
val ace = new AppConfigurationEntry(
KerberosUtil.getKrb5LoginModuleName,
LoginModuleControlFlag.REQUIRED,
krbOptions)
val dynConf = new DynamicConfiguration(Array[AppConfigurationEntry](ace))
val loginContext = new LoginContext(
"hadoop-password-kerberos",
null,
new LoginHandler(user, password),
dynConf)
loginContext.login()
val loginSubject = loginContext.getSubject
val loginPrincipals = loginSubject.getPrincipals
val subject = new Subject()
subject.getPrincipals().addAll(loginPrincipals)
val newLoginUser = UserGroupInformation.getUGIFromSubject(subject)
setUGILogin(newLoginUser, loginContext)
newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS)
newLoginUser
}
}
}
private def setUGILogin(loginUser: UserGroupInformation, loginContext: LoginContext): Unit = {
val cls = classOf[UserGroupInformation]
val mtd = cls.getDeclaredMethod("setLogin", classOf[LoginContext])
mtd.setAccessible(true)
mtd.invoke(loginUser, loginContext)
}
}
示例6: TestNovelLaTeX
//设置package包名称以及导入依赖的类
package com.lyrx.latex.documents
import com.lyrx.latex.{LaTeXArticle, LaTeXNovel, LaTeXReplacements}
import com.lyrx.structures.{TestNovel, Whitepaper}
import com.lyrx.text._
import com.lyrx.text.bib.{BibItem, Online}
import scala.collection.immutable.HashMap
object TestNovelLaTeX extends LaTeXReplacements with FileAssertions {
def main(args: Array[String]): Unit = {
implicit val ctx = Context()
.withInputDir("src/test/resources/dummynovel")
.withOutputDir("target/generated/latex")
.inEnglish
implicit val coll = new SimpleStringCollector();
assertFile(TestNovel.collect[String](TestNovelLaTeX()).writeOut())
}
def apply(): TestNovelLaTeX =
new TestNovelLaTeX(new ParData[String](
Nil,
"testnovel.tex",
sections = Seq(
"Section"
),
replacements = HashMap[String, String]("llb" -> "lyrx Books"),
subSections = Seq( "Subsection" )
).withReplacer(
"""\(\d+,\d+\)""".r,(s:String)=>{
val AR ="""\((\d+),(\d+)\)""".r
s match {
case AR(s1:String,s2:String) => s"\\begin{tiny}[${s1}]\\end{tiny}"
case _ => ""
}
}
))
}
class TestNovelLaTeX(override val parData: ParData[String])
extends LaTeXNovel
with BookData {
override val texTemplate: String = "../../../main/resources/templates/novel.tex"
override def withParData(aParData: ParData[String]): TestNovelLaTeX = new TestNovelLaTeX(aParData)
override val title: String = "How to write a novel"
override val publisher: String = "lyrx GmbH"
override val author: String = "Alexander Weinmann"
override val subtitle: String = "This is the subtitle"
override val motto: String = "Motto"
override val widmung: String = "Widmung"
}
示例7: RapierConfJson
//设置package包名称以及导入依赖的类
package com.frankandrobot.rapier.cli
import scala.collection.immutable.HashMap
sealed trait JsonConf
case class RapierConfJson(val params : RapierParamsJson,
val masterTemplateUri : String,
val trainingDataUri : String,
val learedRulesUri: String,
val dataUri: String)
extends JsonConf
case class RapierParamsJson(val maxOuterLoopFails: Option[Int],
val compressionRandomPairs: Option[Int],
val compressionPriorityQueueSize: Option[Int],
val compressionFails: Option[Int],
val metricMinPositiveMatches: Option[Int],
val maxElementsToSpecialize: Option[Int],
val ruleSizeWeight: Option[Double],
val maxPatternLength: Option[Int],
val maxUnequalPatternLength: Option[Int],
val maxDifferenceInPatternLength: Option[Int])
extends JsonConf
case class MasterTemplateJson(val name : String, val slots : List[String])
extends JsonConf
case class TrainingExampleJson(val document : String,
val filledTemplate: HashMap[String, List[String]])
extends JsonConf
示例8: transform
//设置package包名称以及导入依赖的类
package ftse.transformations.laresflat
import scala.collection.immutable.HashMap
import ftse.simulation.laresflat.LFReachTypes
import ftse.simulation.laresflat.LFReachTypes._
import ftse.formalism.lares.LARES_metamodel._
import ftse.formalism.lares.LaresAlgo
import ftse.formalism.tra.Tra
import ftse.transformations.AbstrTransformer
/**
* This Trait is used to transform the result from the reachability analysis in LFSim.reach() into an Tra-Object.
*/
trait Reach2TraTransformer extends
AbstrTransformer[ReachResult, (Tra, Map[ComposedState, Int])]
with LFReachTypes with LaresAlgo {
/**
* builds a Tra-Object from the ReachStructure from the reach() Method
* @returns Tra-Object and a Map, which contains the mapping from the composed States to the numbered States
*/
override def transform(obj: ReachResult) : (Tra, Map[ComposedState,Int]) = {
var statemap = HashMap(
// unbedingt den initialstate nach vorne schieben und mit 1 beginnen lassen
(obj._2.toList sortWith((a,b) => if (a==obj._3) true else false) zipWithIndex) map (a => (a._1,a._2+1)) :_*
)
val groupedT = obj._1.map(t => {
t._3 match {
case (name,Dirac(weight)) => {
val evalArith = eval(t._3._2)
val weight : Double = evalArith.right.getOrElse(evalArith.left.get)
ftse.formalism.tra.ImmediateTransition(statemap(t._1),name, statemap(t._2),weight): ftse.formalism.tra.Transition
}
//case e : ExponentialTransitionType => ftse.formalism.tra.MarkovianTransition(statemap(t._1),"delay" + e.delay, statemap(t._2),e.delay.toDouble) : ftse.formalism.tra.Transition
case (name,Exponential(rate)) => {
println(name)
val evalArith = eval(t._3._2)
val rate : Double = evalArith.right.getOrElse(evalArith.left.get)
ftse.formalism.tra.MarkovianTransition(statemap(t._1), name, statemap(t._2),rate) : ftse.formalism.tra.Transition
}
}
}).groupBy(_.source)
return (ftse.formalism.tra.Tra(groupedT ,Set[Long](statemap.values.toList.map(a => a : Long):_ *)),statemap)
}
}
示例9: transform
//设置package包名称以及导入依赖的类
package ftse.formalism.lares.flat
import ftse.formalism.lares._
import ftse.formalism.lares.LARES_metamodel._
import ftse.simulation.laresflat.LFReachTypes
import scala.collection.immutable.HashMap
import ftse.simulation.laresflat.LFReachTypes._
import ftse.transformations.AbstrTransformer
override def transform(obj: ReachResult) : String = {
var statemap = HashMap(
// unbedingt den initialstate nach vorne schieben und mit 1 beginnen lassen
(obj._2.toList sortWith((a,b) => if (a==obj._3) true else false) zipWithIndex) map (a => (a._1,a._2+1)) :_*
)
val dotTrans = obj._1.map(e => statemap(e._1) + "->" + statemap(e._2) + "["+(e._3 match {
case (name,Exponential(rate)) => "label=\"("+name+",rate="+eval(e._3._2)+")\"" + " fontcolor=black color=black"
case (name,Dirac(weight)) => "label=\"(*"+name+",rate="+eval(e._3._2)+"*)\"" + " fontcolor=blue color=blue"
case _ => ""
})+"]")
val statesDot = statemap.map(a=> a._2 + " "+a._1.values.map(_.identifier.toText).mkString("[style=filled shape=\"record\" label=\"{","|","}\"]") )
val resstring = dotTrans.foldLeft(
"digraph g {ratio=1.33;node [height=\"2\" width=\"3\" shape = \"ellipse\"]; edge []; graph [nodesep=0.5];"
)((a,b)=>a+"\n "+b)+statesDot.mkString("\n ","\n ","\n")+"}"
//println(resstring)
resstring
}
}
示例10: MongoAndInternal
//设置package包名称以及导入依赖的类
package nsmc.conversion.types
import java.util.Date
import com.mongodb.casbah.Imports._
import nsmc.conversion.SchemaAccumulator
import org.apache.spark.sql.types._
import scala.collection.immutable.HashMap
class MongoAndInternal {
}
object MongoAndInternal {
def toInternal(o: MongoDBObject) : StructureType = {
val convertedPairs = o.toSeq.map(kv => toInternal(kv))
val hm = HashMap[String, ConversionType](convertedPairs:_*)
new StructureType(hm)
}
def toInternal(o: BasicDBObject) : StructureType = {
val convertedPairs = o.toSeq.map(kv => toInternal(kv))
val hm = HashMap[String, ConversionType](convertedPairs:_*)
new StructureType(hm)
}
def toInternal(a: AnyRef) : ConversionType = {
a match {
case d: Date => AtomicType(TimestampType)
case bts: BSONTimestamp => {
val s = Seq("inc" -> AtomicType(IntegerType), "time" -> AtomicType(IntegerType))
StructureType(HashMap[String, ConversionType](s: _*))
}
case ba: Array[Byte] => AtomicType(BinaryType)
case bt: org.bson.types.ObjectId => AtomicType(StringType)
case _: java.lang.Long => AtomicType(LongType)
case _: java.lang.Byte => AtomicType(ByteType)
case _: java.lang.Double => AtomicType(DoubleType)
case _: java.lang.Boolean => AtomicType(BooleanType)
case _:String => AtomicType(StringType)
case _:Integer => AtomicType(IntegerType)
case o:BasicDBObject => toInternal(o)
case l:BasicDBList => {
val sa = new SchemaAccumulator
l.foreach(dbo => sa.considerDatum(dbo.asInstanceOf[AnyRef]))
SequenceType(sa.getInternal)
}
}
}
def toInternal(kv: Pair[String, AnyRef]) : (String, ConversionType) = {
kv match {
case (k: String, a: AnyRef) => {
val vt = toInternal(a)
(k, vt)
}
}
}
}
示例11: Merger
//设置package包名称以及导入依赖的类
package nsmc.conversion.types
import scala.collection.immutable.HashMap
object Merger {
def merge(l: ConversionType, r: ConversionType) : ConversionType = {
(l, r) match {
case (StructureType(lMap), StructureType(rMap)) => {
val keys = lMap.keySet ++ rMap.keySet
val pairs = keys.map(k => {
val inLeft = lMap.isDefinedAt(k)
val inRight = rMap.isDefinedAt(k)
(inLeft, inRight) match {
case (true, true) => (k, merge(lMap.getOrElse(k, null), rMap.getOrElse(k, null)))
case (true, false) => (k, lMap.getOrElse(k, null))
case (false, true) => (k, rMap.getOrElse(k, null))
case (false, false) => (k, null) // can't happen
}
})
val ct = new StructureType(HashMap[String, ConversionType](pairs.toSeq:_*))
ct
}
case (_, _) => l // TODO: assume for now they're equal
}
}
}
示例12: InternalAndSchema
//设置package包名称以及导入依赖的类
package nsmc.conversion.types
import org.apache.spark.sql.types._
import scala.collection.immutable.HashMap
class InternalAndSchema {
}
object InternalAndSchema {
def toSchema(it: ConversionType) : DataType = {
it match {
case AtomicType(dt: DataType) => dt
case SequenceType(et) => ArrayType(toSchema(et))
case StructureType(fields) => {
val converted = fields.map(kv => makeField(kv._1, toSchema(kv._2)))
val sorted = converted.toSeq.sortBy(sf => sf.name)
StructType(sorted)
}
}
}
private def makeField(k:String, t: DataType) : StructField = {
StructField(k, t, nullable = true)
}
def toInternal(schema: Seq[StructField]) : ConversionType = {
val convertedPairs = schema.toSeq.map(toInternal)
val hm = HashMap[String, ConversionType](convertedPairs:_*)
new StructureType(hm)
}
private def toInternal(sf: StructField) : (String, ConversionType) = {
sf.dataType match {
case DateType => (sf.name, AtomicType(DateType))
case DoubleType => (sf.name, AtomicType(DoubleType))
case StringType => (sf.name, AtomicType(StringType))
case IntegerType => (sf.name, AtomicType(IntegerType))
case LongType => (sf.name, AtomicType(LongType))
case TimestampType => (sf.name, AtomicType(TimestampType))
case BooleanType => (sf.name, AtomicType(BooleanType))
case StructType(s) => (sf.name, toInternal(s))
case _ : DecimalType => (sf.name, AtomicType(DoubleType))
}
}
}
示例13: SchemaAccumulator
//设置package包名称以及导入依赖的类
package nsmc.conversion
import nsmc.conversion.types._
import org.apache.spark.sql.types._
import scala.collection.immutable.HashMap
class SchemaAccumulator {
private var currentInternal: Option[ConversionType] = None
private def maybeMerge(l: ConversionType, ro: Option[ConversionType]) : ConversionType = {
ro match {
case Some(r) => Merger.merge(l, r)
case None => l
}
}
def considerDatum(datum: AnyRef) : Unit = {
val recInternalType = MongoAndInternal.toInternal(datum)
currentInternal = Some(maybeMerge(recInternalType, currentInternal))
}
def accumulate(types: Iterator[StructureType]) : Unit = {
types.foreach(ty => {
currentInternal = Some(maybeMerge(ty, currentInternal))
})
}
def getInternal : ConversionType = {
currentInternal match {
case Some(i) => i
case None => new StructureType(new HashMap[String, ConversionType]())
}
}
// should only be called for a top level (record) schema
def getSchema : Seq[StructField] = {
currentInternal match {
case Some(i) => InternalAndSchema.toSchema(i).asInstanceOf[StructType].fields
case None => Seq()
}
}
}
示例14: main
//设置package包名称以及导入依赖的类
package io.radicalbit.flink.closure
import org.apache.flink.api.scala._
import scala.collection.immutable.HashMap
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.core.fs.FileSystem.WriteMode
{
def main(args: Array[String]): Unit = {
val params = ParameterTool.fromArgs(args)
val env = ExecutionEnvironment.getExecutionEnvironment
val tokenize: String => Seq[String] = { s =>
val stopWords = Set("the", "i", "a", "an", "at", "are", "am", "for", "and",
"or", "is", "there", "it", "this", "that", "on", "was", "by", "of",
"to", "in", "to", "not", "be", "with", "you", "have", "as", "can")
s.toLowerCase
.split("\\W+")
.filter(w => !stopWords.contains(w) || w.matches("(\\p{Alpha})+"))
}
val uniqueWords: String => Set[(String, Int)] = s => tokenize(s).toSet[String].map(w => (w, 1))
val mails = env.readCsvFile[(String, String)](
params.getRequired("input"),
lineDelimiter = params.get("line-delimiter", "##//##"),
fieldDelimiter = params.get("field-delimiter", "#|#"),
includedFields = Array(0, 4))
val numberOfDocs = mails.count
val tf = mails.flatMap { x =>
tokenize(x._2).foldLeft(HashMap.empty[String, Int]) { (h, w) =>
val c = h.getOrElse(w, 0)
h + (w -> (c + 1))
} map {
case (k, v) =>
(x._1, k, v)
}
}
val df = mails.flatMap(x => uniqueWords(x._2)).groupBy(0).reduce { (l, r) => (l._1, l._2 + r._2) }
val tfidf = tf.join(df).where(1).equalTo(0) { (l, r) => (l._1, l._2, l._3 * (numberOfDocs.toDouble / r._2)) }
tfidf.writeAsCsv(filePath = params.get("output", "~/Desktop/tf-idf.csv"), writeMode = WriteMode.OVERWRITE)
env.execute("TF-IDF Scala")
}
}
示例15: Molecules
//设置package包名称以及导入依赖的类
package scaffvis.client.store.model
import scaffvis.shared.model._
import diode.data.Pot
import scala.collection.immutable.{HashMap, HashSet}
case class Molecules(im: IndexedMolecules,
svg: Map[MoleculeId, Pot[String]] = Map.empty,
selected: Set[MoleculeId] = Set.empty, active: Option[MoleculeId] = None) {
def molecules: Iterable[Molecule] = im.molecules
def scaffoldMolecules(scaffoldId: ScaffoldId) = im.scaffoldIndex.getOrElse(scaffoldId, Set.empty)
def get(moleculeId: MoleculeId): Option[Molecule] = im.moleculeMap.get(moleculeId)
def get_!(moleculeId: MoleculeId): Molecule = im.moleculeMap.apply(moleculeId)
}
case class IndexedMolecules(molecules: Vector[Molecule]) {
val moleculeMap: Map[MoleculeId, Molecule] = HashMap(molecules.map(m => m.id -> m).toSeq:_*)
val scaffoldIndex: Map[ScaffoldId,Set[MoleculeId]] = {
val pairs: Iterable[(ScaffoldId, MoleculeId)] = for {
m <- molecules
s <- m.scaffolds
} yield s -> m.id
val groups: Map[ScaffoldId, Iterable[(ScaffoldId, MoleculeId)]] = pairs.groupBy(_._1)
val index: Map[ScaffoldId, Set[MoleculeId]] = groups.mapValues(i => i.map(_._2).to[HashSet])
HashMap(index.toSeq:_*)
}
}