本文整理汇总了Scala中java.util.concurrent.ConcurrentHashMap类的典型用法代码示例。如果您正苦于以下问题:Scala ConcurrentHashMap类的具体用法?Scala ConcurrentHashMap怎么用?Scala ConcurrentHashMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConcurrentHashMap类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1:
//设置package包名称以及导入依赖的类
package com.shashank.akkahttp.project
import java.util.concurrent.ConcurrentHashMap
import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import com.shashank.akkahttp.project.Models.{LoadRequest, ServiceJsonProtoocol}
import spray.json.JsArray
import scala.collection.JavaConverters._
import spray.json.{DefaultJsonProtocol, JsArray, pimpAny}
import spray.json.DefaultJsonProtocol._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql._
trait RestService {
implicit val system: ActorSystem
implicit val materializer: ActorMaterializer
implicit val sparkSession: SparkSession
val datasetMap = new ConcurrentHashMap[String, Dataset[Row]]()
import ServiceJsonProtoocol._
val route =
pathSingleSlash {
get {
complete {
"welcome to rest service"
}
}
} ~
path("load") {
post {
entity(as[LoadRequest]) {
loadRequest => complete {
val id = "" + System.nanoTime()
val dataset = sparkSession.read.format("csv")
.option("header", "true")
.load(loadRequest.path)
datasetMap.put(id, dataset)
id
}
}
}
} ~
path("view" / """[\w[0-9]-_]+""".r) { id =>
get {
complete {
val dataset = datasetMap.get(id)
dataset.take(10).map(row => row.toString())
}
}
}
}
示例2: RemoteMetricsOn
//设置package包名称以及导入依赖的类
package akka.remote
import java.util.concurrent.ConcurrentHashMap
import scala.annotation.tailrec
import akka.actor.ActorSelectionMessage
import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider
import akka.event.Logging
import akka.routing.RouterEnvelope
private[akka] class RemoteMetricsOn(system: ExtendedActorSystem) extends RemoteMetrics {
private val logFrameSizeExceeding: Int = system.settings.config.getBytes(
"akka.remote.log-frame-size-exceeding").toInt
private val log = Logging(system, this.getClass)
private val maxPayloadBytes: ConcurrentHashMap[Class[_], Integer] = new ConcurrentHashMap
override def logPayloadBytes(msg: Any, payloadBytes: Int): Unit =
if (payloadBytes >= logFrameSizeExceeding) {
val clazz = msg match {
case x: ActorSelectionMessage ? x.msg.getClass
case x: RouterEnvelope ? x.message.getClass
case _ ? msg.getClass
}
// 10% threshold until next log
def newMax = (payloadBytes * 1.1).toInt
@tailrec def check(): Unit = {
val max = maxPayloadBytes.get(clazz)
if (max eq null) {
if (maxPayloadBytes.putIfAbsent(clazz, newMax) eq null)
log.info("Payload size for [{}] is [{}] bytes", clazz.getName, payloadBytes)
else check()
} else if (payloadBytes > max) {
if (maxPayloadBytes.replace(clazz, max, newMax))
log.info("New maximum payload size for [{}] is [{}] bytes", clazz.getName, payloadBytes)
else check()
}
}
check()
}
}
示例3: State
//设置package包名称以及导入依赖的类
package com.github.btesila.weather.monitor.state.mgmt
import java.util.concurrent.ConcurrentHashMap
import com.github.btesila.weather.monitor.model.{ActiveLocationRecord, CurrentWeatherConditions, Location}
import org.joda.time._
import scala.collection._
import scala.collection.convert.decorateAsScala._
object State {
val activeLocationRecords: concurrent.Map[String, ActiveLocationRecord] =
new ConcurrentHashMap[String, ActiveLocationRecord]().asScala
def recordIdFor(location: Location): String = recordIdFor(location.city.toLowerCase, location.country.toLowerCase)
def recordIdFor(city: String, country: String): String = s"${city.toLowerCase}-${country.toLowerCase}"
def findActiveLocation(city: String, country: String): Option[ActiveLocationRecord] = {
val id = recordIdFor(city, country)
activeLocationRecords.get(id) match {
case Some(record) => {
activeLocationRecords.put(id, record.copy(lastTriggered = DateTime.now))
Some(record)
}
case _ => None
}
}
def addLocationRecord(location: Location, crtWeatherConditions: CurrentWeatherConditions): Unit = {
val id = recordIdFor(location)
activeLocationRecords.put(id, ActiveLocationRecord(DateTime.now, location, crtWeatherConditions))
}
def updateLocationRecord(id: String, crtConditions: CurrentWeatherConditions): Unit =
activeLocationRecords.get(id).foreach { record =>
activeLocationRecords.put(id, record.copy(crtConditions = crtConditions))
}
def removeLocationRecord(entry: (String, ActiveLocationRecord)): Unit = {
val (id, activeLocation) = entry
activeLocationRecords.remove(id, activeLocation)
}
}
示例4: Session
//设置package包名称以及导入依赖的类
package com.spooky.bittorrent.l.session
import com.spooky.bittorrent.l.file.TorrentFileManager
import com.spooky.bittorrent.model.PeerId
import com.spooky.bittorrent.InfoHash
import java.util.concurrent.ConcurrentHashMap
import com.spooky.bittorrent.l.session.client.ClientSession
import java.util.function.Function
import com.spooky.cipher.MSEKeyPair
import com.spooky.bittorrent.model.TorrentStatistics
import spooky.actor.ActorRef
class Session(val fileManager: TorrentFileManager, val peerId: PeerId, val announce: ActorRef) extends ViewableSession {
private val torrent = fileManager.torrent
private val infoHash: InfoHash = fileManager.torrent.infoHash
private val clients = new ConcurrentHashMap[PeerId, ClientSession]
@volatile private var activeListeners: Int = 0
@volatile private var activeClients: Int = 0
def statistics: TorrentStatistics = {
TorrentStatistics(torrent.infoHash, torrent.info.length, torrent.info.length, 0, 0)
}
def init(peerId: PeerId, keyPair: MSEKeyPair): ClientSession = { //not used
clients.put(peerId, new ClientSession(peerId, keyPair))
}
//announce peers
}
示例5: Sessions
//设置package包名称以及导入依赖的类
package com.spooky.bittorrent.l.session
import scala.collection.JavaConversions._
import com.spooky.bittorrent.model.PeerId
import com.spooky.bittorrent.l.file.TorrentFileManager
import java.util.concurrent.ConcurrentHashMap
import com.spooky.bittorrent.model.TorrentSetup
import com.spooky.bittorrent.Checksum
import com.spooky.bittorrent.InfoHash
import com.spooky.bittorrent.model.EnrichedTorrentSetup
import spooky.actor.ActorSystem
import com.spooky.bittorrent.actors.AnnounceActor
class Sessions(actorSystem: ActorSystem) {
private val sessions = new ConcurrentHashMap[InfoHash, Session]
def get(infoHash: InfoHash): Option[Session] = Option(sessions.get(infoHash))
def register(setup: EnrichedTorrentSetup): Session = setup match {
case EnrichedTorrentSetup(torrent, root, state) => {
val session = get(torrent.infoHash)
session.getOrElse({
val fileManager = TorrentFileManager(torrent, root, state)
val peerId = PeerId.create
val session = new Session(fileManager, peerId, actorSystem.actorOf(AnnounceActor.props))
sessions.put(torrent.infoHash, session)
session
})
}
}
def infoHashes: List[InfoHash] = sessions.keys().toList
}
示例6: TcpWriteActor
//设置package包名称以及导入依赖的类
package spooky.io
import java.nio.channels.SocketChannel
import spooky.actor.Props
import spooky.actor.Actor
import java.nio.ByteBuffer
import spooky.actor.ActorRef
import spooky.io.TcpThread._
import java.util.concurrent.ConcurrentHashMap
object TcpWriteActor {
def props(channel: SocketChannel, actors: ConcurrentHashMap[Tcp.Address, Tuple2[MessageActorRef, WriteActorRef]]): Props = Props(classOf[TcpWriteActor], channel, actors)
}
private[io] class TcpWriteActor(private val channel: SocketChannel, actors: ConcurrentHashMap[Tcp.Address, Tuple2[MessageActorRef, WriteActorRef]]) extends Actor {
def receive: PartialFunction[Any, Unit] = {
case Tcp.Register(messageActor, address, _, _) => {
val previous = actors.put(address, (messageActor, self))
assert(previous == null)
context.become(traffic())
}
}
private def traffic(): PartialFunction[Any, Unit] = {
// case Tcp.Close => {
// sender() ! Tcp.Closed
// }
case Tcp.Write(data, Tcp.NoAck) => {
write(data.toByteBuffer)
}
case Tcp.Write(data, ack) => {
if (write(data.toByteBuffer)) {
sender ! ack
}
}
}
private def write(bb: ByteBuffer): Boolean = {
if (!channel.isConnected) {
Thread.currentThread.interrupt()
false
} else {
while (bb.hasRemaining()) {
channel.write(bb)
}
true
}
}
}
示例7: ExampleForeachHeader
//设置package包名称以及导入依赖的类
package ppl.delite.runtime.codegen.kernels.scala.examples
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.locks.ReentrantLock
object ExampleForeachHeader {
def apply(in0: Array[Int], in1: Array[Double], in2: Double) = new ExampleForeachHeader(in0, in1, in2)
}
final class ExampleForeachHeader(in0: Array[Int], in1: Array[Double], in2: Double) {
//this is the apply method of another (kernel) object: shouldn't be generated
def kernel_apply(in0: Array[Int], in1: Array[Double], in2: Double): Foreach = {
new Foreach {
def in = in0
def foreach(elem: Int) { in1(elem) = (in1(elem-1) + in1(elem) + in1(elem+1))/3 }
def sync(idx: Int) = List(in0(idx-1), in0(idx), in0(idx+1))
}
}
abstract class Foreach {
def in: Array[Int]
def foreach(elem: Int)
def sync(idx: Int): List[Any]
}
val closure = kernel_apply(in0, in1, in2)
val lockMap = new ConcurrentHashMap[Any, ReentrantLock]
}
object ExampleForeach {
def apply(foreach: ExampleForeachHeader) {
val in = foreach.closure.in
val size = in.size
var i = size*2/4 //size*chunkIdx/numChunks
val end = size*3/4 //size*(chunkIdx+1)/numChunks
while (i < end) {
val sync = foreach.closure.sync(i).sortBy(System.identityHashCode(_)) //TODO: optimize locking mechanism
for (e <- sync) {
foreach.lockMap.putIfAbsent(e, new ReentrantLock)
foreach.lockMap.get(e).lock
}
foreach.closure.foreach(in(i))
for (e <- sync.reverse) {
foreach.lockMap.get(e).unlock
}
i += 1
}
}
}
示例8: DispatcherThreadFactory
//设置package包名称以及导入依赖的类
package knot.core.dispatch
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ConcurrentHashMap, ThreadFactory}
object DispatcherThreadFactory {
private val threadNumbers = new ConcurrentHashMap[String, AtomicInteger]()
def getNewTheadNumber(prefix: String): Int = {
threadNumbers.computeIfAbsent(prefix, _ => new AtomicInteger(1)).getAndIncrement()
}
}
class DispatcherThreadFactory(val prefix: String) extends ThreadFactory {
import DispatcherThreadFactory._
override def newThread(r: Runnable): Thread = {
val t = new Thread(r, s"knot-$prefix-${getNewTheadNumber(prefix)}")
t.setDaemon(true)
t.setPriority(Thread.NORM_PRIORITY)
t
}
}
示例9: get
//设置package包名称以及导入依赖的类
package knot.core.logging
import java.util.concurrent.ConcurrentHashMap
import knot.core.config.{Configs, LogConfig}
import knot.core.util.Utils.Implicits._
import scala.reflect.ClassTag
trait LoggerFactory {
def get[T: ClassTag](): Logger
def get[T: ClassTag](config: LogConfig): Logger
def get(clazz: Class[_]): Logger
def get(clazz: Class[_], config: LogConfig): Logger
}
class DefaultLoggerFactory extends LoggerFactory {
val loggerCache = new ConcurrentHashMap[String, Logger]()
override def get[T: ClassTag](): Logger = {
get(Configs.log)
}
override def get(clazz: Class[_]): Logger = {
get(clazz, Configs.log)
}
override def get[T: ClassTag](config: LogConfig): Logger = {
val c = implicitly[ClassTag[T]]
get(c.runtimeClass, config)
}
override def get(clazz: Class[_], config: LogConfig): Logger = {
Option(clazz) match {
case Some(c) => loggerCache.computeIfAbsent(c.getName, x => StdOutLogger(x, config.logLevel.toIntOrDefault))
case None => StdOutLogger("unknown")
}
}
}
示例10: ScheduleActor
//设置package包名称以及导入依赖的类
package com.actorsys.crawler
import java.util.concurrent.ConcurrentHashMap
import akka.actor.{Actor, ActorLogging, ActorRef}
import com.actorsys.service.{ScheduledWebUrl, WebUrl}
class ScheduleActor extends Actor with ActorLogging {
val config = Map[String, Any](
"domain.black.list" -> Seq("google.com", "facebook.com", "twitter.com"),
"crawl.retry.times" -> 3,
"filter.page.url.suffixes" -> Seq(".zip", ".avi", ".mkv", ",mp4")
)
val counter = new ConcurrentHashMap[String, Int]()
override def receive: Receive = {
case WebUrl(url) => sender ! ScheduledWebUrl(url, config)
case (link: String, count: Int) => {
counter.put(link, count)
log.info("counter:" + counter.toString)
}
}
}
object ScheduleActor {
def sendFeeds(crawlerActorRef: ActorRef, seeds: Seq[String]) = {
seeds.foreach(crawlerActorRef ! _)
}
}
示例11: ProjectQueryHandler
//设置package包名称以及导入依赖的类
package fairshare.backend.project
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import fairshare.backend.eventsourcing.Fact
import fairshare.backend.eventsourcing.journals.PollingJournalReader
import scalaz.concurrent.Task
import scalaz.stream.sink
import scala.collection.JavaConverters._
class ProjectQueryHandler(reader: PollingJournalReader[ProjectEvent]) {
val projectList = new ConcurrentHashMap[ProjectId, String]()
private def updateProjectList(fact: Fact[ProjectEvent]): Unit = {
val projectId = ProjectId(UUID.fromString(fact.subject.key))
val projectName = fact.event match {
case ProjectEvent.Created(_, name) => Some(name)
case ProjectEvent.NameModified(name) => Some(name)
case _ => None
}
for {
name <- projectName
} projectList.put(projectId, name)
}
val listUpdater = reader.readUpdates.to(
sink.lift[Task, Fact[ProjectEvent]] {
fact =>
Task.delay {
updateProjectList(fact)
}
}
)
listUpdater.run.runAsync(_ => ())
def getAll: Map[ProjectId, String] = projectList.asScala.toMap
def getById(projectId: ProjectId): Option[String] = projectList.asScala.get(projectId)
}
示例12: ConnectionPool
//设置package包名称以及导入依赖的类
package com.redislabs.provider.redis
import redis.clients.jedis.{JedisPoolConfig, Jedis, JedisPool}
import redis.clients.jedis.exceptions.JedisConnectionException
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
object ConnectionPool {
@transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
new ConcurrentHashMap[RedisEndpoint, JedisPool]()
def connect(re: RedisEndpoint): Jedis = {
val pool = pools.getOrElseUpdate(re,
{
val poolConfig: JedisPoolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(250)
poolConfig.setMaxIdle(32)
poolConfig.setTestOnBorrow(false)
poolConfig.setTestOnReturn(false)
poolConfig.setTestWhileIdle(false)
poolConfig.setMinEvictableIdleTimeMillis(60000)
poolConfig.setTimeBetweenEvictionRunsMillis(30000)
poolConfig.setNumTestsPerEvictionRun(-1)
new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum)
}
)
var sleepTime: Int = 4
var conn: Jedis = null
while (conn == null) {
try {
conn = pool.getResource
}
catch {
case e: JedisConnectionException if e.getCause.toString.
contains("ERR max number of clients reached") => {
if (sleepTime < 500) sleepTime *= 2
Thread.sleep(sleepTime)
}
case e: Exception => throw e
}
}
conn
}
}
示例13: TimedFlowOps
//设置package包名称以及导入依赖的类
package com.flipkart.connekt.busybees.streams.flows.profilers
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL}
import akka.stream.{BidiShape, FlowShape}
import com.flipkart.connekt.busybees.models.RequestTracker
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.utils.StringUtils._
import scala.collection.JavaConverters._
import scala.util.Try
object TimedFlowOps {
implicit class TimedFlow[I, O, T <: RequestTracker, M](dispatchFlow: Flow[(I, T), (Try[O], T), M]) extends Instrumented {
val startTimes = new ConcurrentHashMap[T, Long]().asScala
private def profilingShape(apiName: String) = BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
val out = b.add(Flow[(I, T)].map {
case (request, requestTracker) =>
startTimes.put(requestTracker, System.currentTimeMillis())
(request, requestTracker)
})
val in = b.add(Flow[(Try[O], T)].map {
case (response, httpRequestTracker) =>
startTimes.get(httpRequestTracker).map(start => {
startTimes.remove(httpRequestTracker)
val duration = System.currentTimeMillis() - start
ConnektLogger(LogFile.PROCESSORS).trace(s"TimedFlowOps/$apiName MessageId: ${httpRequestTracker.messageId} took : $duration ms")
duration
}).foreach(registry.timer(getMetricName(apiName + Option(httpRequestTracker.provider).map("." + _).orEmpty)).update(_, TimeUnit.MILLISECONDS))
(response, httpRequestTracker)
})
BidiShape.fromFlows(out, in)
})
def timedAs(apiName: String) = Flow.fromGraph(GraphDSL.create() { implicit b =>
val s = b.add(profilingShape(apiName))
val p = b.add(dispatchFlow)
s.out1 ~> p ~> s.in2
FlowShape(s.in1, s.out2)
})
}
}
示例14: createNewExecutionContext
//设置package包名称以及导入依赖的类
package com.smule.smg
import java.util.concurrent.{Executors, ConcurrentHashMap}
import scala.collection.concurrent
import scala.collection.JavaConversions._
import play.libs.Akka
import scala.concurrent.ExecutionContext
val rrdGraphCtx: ExecutionContext = Akka.system.dispatchers.lookup("akka-contexts.rrd-graph")
val monitorCtx: ExecutionContext = Akka.system.dispatchers.lookup("akka-contexts.monitor")
private val log = SMGLogger
private val ctxMap: concurrent.Map[Int,ExecutionContext] = new ConcurrentHashMap[Int, ExecutionContext]()
private def createNewExecutionContext(maxThreads: Int): ExecutionContext = {
val es = Executors.newFixedThreadPool(maxThreads)
ExecutionContext.fromExecutorService(es)
}
def ctxForInterval(interval: Int): ExecutionContext = {
ctxMap(interval)
}
def initializeUpdateContexts(intervals: Seq[Int], threadsPerIntervalMap: Map[Int,Int], defaultThreadsPerInterval: Int): Unit = {
intervals.foreach { interval =>
if (!ctxMap.contains(interval)) {
val maxThreads = if (threadsPerIntervalMap.contains(interval)) threadsPerIntervalMap(interval) else defaultThreadsPerInterval
val ec = createNewExecutionContext(maxThreads)
ctxMap(interval) = ec
log.info("ExecutionContexts.initializeUpdateContexts: Created ExecutionContext for interval=" + interval +
" maxThreads=" + maxThreads + " ec.class="+ ec.getClass.getName)
}
}
}
}
示例15: PriorKnowledgeTransporter
//设置package包名称以及导入依赖的类
package com.twitter.finagle.http2.transport
import com.twitter.cache.FutureCache
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.http2.Http2Transporter
import com.twitter.finagle.http2.transport.Http2ClientDowngrader.StreamMessage
import com.twitter.finagle.transport.Transport
import com.twitter.util.Future
import java.net.SocketAddress
import java.util.concurrent.ConcurrentHashMap
private[http2] class PriorKnowledgeTransporter(
underlying: Transporter[Any, Any])
extends Transporter[Any, Any] {
private[this] val cache = new ConcurrentHashMap[SocketAddress, Future[MultiplexedTransporter]]()
private[this] val fn: SocketAddress => Future[MultiplexedTransporter] = { addr: SocketAddress =>
underlying(addr).map { transport =>
val multi = new MultiplexedTransporter(
Transport.cast[StreamMessage, StreamMessage](transport),
addr
)
multi.onClose.ensure {
cache.remove(addr, multi)
}
multi
}
}
private[this] val cachedFn = FutureCache.fromMap(fn, cache)
def apply(addr: SocketAddress): Future[Transport[Any, Any]] =
cachedFn(addr).map { multi => Http2Transporter.unsafeCast(multi()) }
}