当前位置: 首页>>代码示例>>Scala>>正文


Scala Collections类代码示例

本文整理汇总了Scala中java.util.Collections的典型用法代码示例。如果您正苦于以下问题:Scala Collections类的具体用法?Scala Collections怎么用?Scala Collections使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Collections类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: HttpEventSSEHandleTest

//设置package包名称以及导入依赖的类
package mesosphere.marathon
package core.event.impl.stream

import java.util.Collections
import javax.servlet.http.HttpServletRequest

import mesosphere.marathon.test.{ MarathonSpec, Mockito }
import org.eclipse.jetty.servlets.EventSource.Emitter
import org.scalatest.{ GivenWhenThen, Matchers }
import mesosphere.marathon.stream._

class HttpEventSSEHandleTest extends MarathonSpec with Matchers with Mockito with GivenWhenThen {

  test("events should be filtered") {
    Given("An emiter")
    val emitter = mock[Emitter]
    Given("An request with params")
    val req = mock[HttpServletRequest]
    req.getParameterMap returns Map("event_type" -> Array("xyz"))

    Given("handler for request is created")
    val handle = new HttpEventSSEHandle(req, emitter)

    When("Want to sent unwanted event")
    handle.sendEvent("any event", "")

    Then("event should NOT be sent")
    verify(emitter, never).event("any event", "")

    When("Want to sent subscribed event")
    handle.sendEvent("xyz", "")

    Then("event should be sent")
    verify(emitter).event("xyz", "")
  }

  test("events should NOT be filtered") {
    Given("An emiter")
    val emitter = mock[Emitter]

    Given("An request without params")
    val req = mock[HttpServletRequest]
    req.getParameterMap returns Collections.emptyMap()

    Given("handler for request is created")
    val handle = new HttpEventSSEHandle(req, emitter)

    When("Want to sent event")
    handle.sendEvent("any event", "")
    Then("event should NOT be sent")

    verify(emitter).event("any event", "")
    When("Want to sent event")

    handle.sendEvent("xyz", "")

    Then("event should be sent")
    verify(emitter).event("xyz", "")
  }
} 
开发者ID:xiaozai512,项目名称:marathon,代码行数:61,代码来源:HttpEventSSEHandleTest.scala

示例2: TestAuthFixture

//设置package包名称以及导入依赖的类
package mesosphere.marathon.api

import java.util.Collections
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }

import mesosphere.marathon.plugin.auth.{ Authenticator, AuthorizedAction, Authorizer, Identity }
import mesosphere.marathon.plugin.http.{ HttpRequest, HttpResponse }
import mesosphere.marathon.test.Mockito

import scala.concurrent.Future

class TestAuthFixture() extends Mockito {

  type Auth = Authenticator with Authorizer

  var identity: Identity = new Identity {}

  var authenticated: Boolean = true
  var authorized: Boolean = true
  var authFn: Any => Boolean = { _ => true }

  val UnauthorizedStatus = 401
  val NotAuthenticatedStatus = 403

  def auth: Auth = new Authorizer with Authenticator {
    override def authenticate(request: HttpRequest): Future[Option[Identity]] = {
      Future.successful(if (authenticated) Some(identity) else None)
    }
    override def handleNotAuthenticated(request: HttpRequest, response: HttpResponse): Unit = {
      response.status(NotAuthenticatedStatus)
    }
    override def handleNotAuthorized(principal: Identity, response: HttpResponse): Unit = {
      response.status(UnauthorizedStatus)
    }
    override def isAuthorized[Resource](
      principal: Identity,
      action: AuthorizedAction[Resource],
      resource: Resource): Boolean = {
      authorized && authFn(resource)
    }
  }

  var request: HttpServletRequest = {
    val req = mock[HttpServletRequest]
    req.getHeaderNames returns Collections.emptyEnumeration()
    req.getHeaders(any) returns Collections.emptyEnumeration()
    req
  }
  var response: HttpServletResponse = mock[HttpServletResponse]
} 
开发者ID:xiaozai512,项目名称:marathon,代码行数:51,代码来源:TestAuthFixture.scala

示例3: KafkaConsumer

//设置package包名称以及导入依赖的类
package jp.gr.java_conf.massakai.kafka

import java.util.Collections

import kafka.api.{PartitionOffsetRequestInfo, FetchRequestBuilder}
import kafka.common.TopicAndPartition
import kafka.javaapi._
import kafka.javaapi.consumer.SimpleConsumer
import collection.JavaConversions._

object KafkaConsumer {
  def findLeader(bootstraps: Seq[Broker], topic: String, partition: Int): Option[PartitionMetadata] = {
    for (bootstrap <- bootstraps) {
      val consumer = new SimpleConsumer(bootstrap.host, bootstrap.port, 100000, 64 * 1024, "leaderLookup")
      val topics = Collections.singletonList(topic)
      val req = new TopicMetadataRequest(topics)
      val resp = consumer.send(req)
      val metadata: java.util.List[TopicMetadata] = resp.topicsMetadata
      for (topicMetadata: TopicMetadata <- metadata) {
        for (partitionMetadata: PartitionMetadata <- topicMetadata.partitionsMetadata) {
          if (partitionMetadata.partitionId == partition) {
            return Some(partitionMetadata)
          }
        }
      }
    }
    None
  }
}

case class KafkaConsumer(leadBroker: String, port: Int, soTimeout: Int, bufferSize: Int, clientName: String) {
  val consumer = new SimpleConsumer(leadBroker, port, soTimeout, bufferSize, clientName)


  def getMessages(topic: String, partition: Int, offset: Long, fetchSize: Int): FetchResponse = {
    val request = new FetchRequestBuilder()
      .clientId(clientName)
      .addFetch(topic, partition, offset, fetchSize)
      .build()
    consumer.fetch(request)
  }

  def getLastOffset(topic: String, partition: Int, whichTime: Long): Option[Long] = {
    val topicAndPartition = new TopicAndPartition(topic, partition)
    val requestInfo = new java.util.HashMap[TopicAndPartition, PartitionOffsetRequestInfo]()
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1))
    val request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion, clientName)
    val response = consumer.getOffsetsBefore(request)
    if (response.hasError) {
      println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition))
      None
    } else {
      Some(response.offsets(topic, partition)(0))
    }
  }

  // TODO: ?????
  // consumer.close()
} 
开发者ID:massakai,项目名称:finagle-kafka-sample,代码行数:60,代码来源:KafkaConsumer.scala

示例4: Consumer

//设置package包名称以及导入依赖的类
package co.s4n.infrastructure.kafka

import java.util.concurrent._
import java.util.{ Collections, Properties }

import org.apache.kafka.clients.consumer.{ ConsumerConfig, KafkaConsumer }

import scala.collection.JavaConverters._


object Consumer {

  def createConsumerConfig(): Properties = {
    val props = new Properties()
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "KafkaProducerExample")
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000")
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000")
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    props
  }

  def run(): Unit = {
    val consumer = new KafkaConsumer[String, String](createConsumerConfig())
    consumer.subscribe(Collections.singletonList("UsersTopic"))

    Executors.newSingleThreadExecutor.execute(() => {
      while (true) {
        val records = consumer.poll(1000)

        for (record <- records.iterator().asScala) {
          println("\n\n Received message: (" + record.key() + ", " + record.value() + ") at offset " + record.offset())
        }
      }
    })
  }

} 
开发者ID:bazzo03,项目名称:users-api,代码行数:41,代码来源:Consumer.scala

示例5: QueryMatcher

//设置package包名称以及导入依赖的类
package lert.elasticsearch.matcher

import java.util.{Collections, Date}
import javax.inject.Inject

import com.fasterxml.jackson.databind.ObjectMapper
import lert.core.processor.AlertMessage
import lert.core.status.Status
import lert.elasticsearch.Response
import org.apache.http.entity.ContentType
import org.apache.http.nio.entity.NStringEntity
import org.elasticsearch.client.RestClient
import lert.elasticsearch.ElasticSearchProcessorUtils._

class QueryMatcher @Inject()(implicit objectMapper: ObjectMapper) extends Matcher {
  override def supports(params: Map[String, _]): Boolean =
    params.contains("query")

  override def query(client: RestClient, params: Map[String, _], status: Option[Status]): Seq[AlertMessage] = {
    val lastProcessedTimestamp = status.map(_.lastProcessedTimestamp).getOrElse(new Date(0))
    val query = params("query").toString.replace("{lastProcessedTimestamp}", lastProcessedTimestamp.getTime.toString)
    val body = new NStringEntity(query, ContentType.APPLICATION_JSON)
    client
      .performRequest("GET", s"/${getIndexName(params)}/_search", Collections.emptyMap[String, String](), body)
      .getEntity
      .to[Response]
      .hits.hits
      .map(hit => AlertMessage(hit._source))
  }
} 
开发者ID:l3rt,项目名称:l3rt,代码行数:31,代码来源:QueryMatcher.scala

示例6: IotHubPartitionSource

//设置package包名称以及导入依赖的类
// Copyright (c) Microsoft. All rights reserved.

package com.microsoft.azure.iot.kafka.connect.source

import java.util.{Collections, Map}

import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.connect.data.Struct
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.source.SourceRecord

import scala.collection.mutable.ListBuffer
import scala.util.control.NonFatal

class IotHubPartitionSource(val dataReceiver: DataReceiver,
    val partition: String,
    val topic: String,
    val batchSize: Int,
    val sourcePartition: Map[String, String])
  extends LazyLogging
    with JsonSerialization {

  def getRecords: List[SourceRecord] = {

    logger.debug(s"Polling for data from Partition $partition")
    val list = ListBuffer.empty[SourceRecord]
    try {
      val messages: Iterable[IotMessage] = this.dataReceiver.receiveData(batchSize)

      if (messages.isEmpty) {
        logger.debug(s"Finished processing all messages from partition ${this.partition}")
      } else {
        logger.debug(s"Received ${messages.size} messages from partition ${this.partition} " +
          s"(requested $batchSize batch)")

        for (msg: IotMessage <- messages) {

          val kafkaMessage: Struct = IotMessageConverter.getIotMessageStruct(msg)
          val sourceOffset = Collections.singletonMap("EventHubOffset",
            kafkaMessage.getString(IotMessageConverter.offsetKey))
          val sourceRecord = new SourceRecord(sourcePartition, sourceOffset, this.topic, kafkaMessage.schema(),
            kafkaMessage)
          list += sourceRecord
        }
      }
    } catch {
      case NonFatal(e) =>
        val errorMsg = s"Error while getting SourceRecords for partition ${this.partition}. " +
          s"Exception - ${e.toString} Stack trace - ${e.printStackTrace()}"
        logger.error(errorMsg)
        throw new ConnectException(errorMsg, e)
    }
    logger.debug(s"Obtained ${list.length} SourceRecords from IotHub")
    list.toList
  }
} 
开发者ID:Azure,项目名称:toketi-kafka-connect-iothub,代码行数:57,代码来源:IotHubPartitionSource.scala

示例7: Consumer

//设置package包名称以及导入依赖的类
package com.stulsoft.kafka2.consumer

import java.util.{Collections, Properties}

import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.KafkaConsumer


object Consumer extends App with LazyLogging {
  logger.info("Started consumer")

  getMessages()

  def getMessages(): Unit = {
    val props = new Properties
    props.put("bootstrap.servers", "localhost:9092,localhost:9093")
    props.put("group.id", "test")
    props.put("enable.auto.commit", "true")
    props.put("auto.commit.interval.ms", "1000")
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")

    val consumer = new KafkaConsumer[String, String](props)
    consumer.subscribe(Collections.singletonList("myClusterTopic"))
    while (true) {
      val records = consumer.poll(100)
      records.forEach(record => {
        val resultText = s"""Received message.\n\tPartition = ${record.partition()}, offset is ${record.offset}, topic is "${record.topic()}" key is "${record.key}", value is "${record.value}""""
        logger.info(resultText)
      })
      consumer.commitSync()
    }
  }

  logger.info("Finished consumer")
} 
开发者ID:ysden123,项目名称:poc,代码行数:37,代码来源:Consumer.scala

示例8: Consumer

//设置package包名称以及导入依赖的类
package com.stulsoft.consumer

import org.slf4j.{Logger, LoggerFactory}
import java.util.{Collections, Properties}

import org.apache.kafka.clients.consumer.KafkaConsumer

/**
  * @author Yuriy Stul.
  */
object Consumer extends App {
  val logger: Logger = LoggerFactory.getLogger(Consumer.getClass)
  logger.info("Started consumer")
  readMessages()

  /**
    * Reads messages
    */
  def readMessages(): Unit = {

    val props = new Properties
    props.put("bootstrap.servers", "localhost:9092,localhost:9093")
    props.put("group.id", "test")
    props.put("enable.auto.commit", "true")
    props.put("auto.commit.interval.ms", "1000")
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")

    val consumer = new KafkaConsumer[String, String](props)
    consumer.subscribe(Collections.singletonList("myClusterTopic"))
    while (true) {
      val records = consumer.poll(100)
      records.forEach(record => {
        logger.info(s"offset = ${record.offset}, key = ${record.key}, value = ${record.value}")
      })
      consumer.commitSync()
    }

    //    consumer.unsubscribe()
    //    consumer.close()
  }
} 
开发者ID:ysden123,项目名称:poc,代码行数:43,代码来源:Consumer.scala

示例9: TravisCommand

//设置package包名称以及导入依赖的类
package com.atomist.rug.commands.travis

import com.atomist.rug.kind.service.ServicesMutableView
import com.atomist.rug.kind.travis.{RealTravisEndpoints, TravisAPIEndpoint, TravisEndpoints}
import com.atomist.rug.spi.Command
import org.springframework.http.HttpHeaders
import java.util.{Collections, Set}

class TravisCommand extends Command[ServicesMutableView] {

  override def nodeTypes: Set[String] = Collections.singleton("Services")

  override def name: String = "travis"

  override def invokeOn(treeNode: ServicesMutableView): AnyRef = {
    new TravisOperations(new RealTravisEndpoints)
  }
}

class TravisOperations(travisEndpoints: TravisEndpoints) {

  def restartBuild(buildId: Int, org: String, token: String): TravisStatus = {
    val api: TravisAPIEndpoint = TravisAPIEndpoint.stringToTravisEndpoint(org)
    val travisToken: String = travisEndpoints.postAuthGitHub(api, token)
    val headers: HttpHeaders = TravisEndpoints.authHeaders(api, travisToken)
    try {
      travisEndpoints.postRestartBuild(api, headers, buildId)
      new TravisStatus(true, s"Successfully restarted build `${buildId}` on Travis CI")
    }
    catch {
      case e: Exception => new TravisStatus(false, e.getMessage)
    }
  }
}

case class TravisStatus(success: Boolean = true, message: String = "") 
开发者ID:atomist-recycling,项目名称:travis-rug-type,代码行数:37,代码来源:TravisCommand.scala

示例10: TravisEndpointsTest

//设置package包名称以及导入依赖的类
package com.atomist.rug.kind.travis

import java.util.Collections

import com.atomist.rug.InvalidRugParameterPatternException
import org.scalatest.{FlatSpec, Matchers}

class TravisEndpointsTest extends FlatSpec with Matchers {

  import TravisAPIEndpoint._

  "stringToTravisEndpoint" should "accept org" in {
    stringToTravisEndpoint("org") should be(TravisOrgEndpoint)
  }

  it should "accept .org" in {
    stringToTravisEndpoint(".org") should be(TravisOrgEndpoint)
  }

  it should "accept com" in {
    stringToTravisEndpoint("com") should be(TravisComEndpoint)
  }

  it should "accept .com" in {
    stringToTravisEndpoint(".com") should be(TravisComEndpoint)
  }

  it should "throw an exception if not given a valid API type" in {
    an[InvalidRugParameterPatternException] should be thrownBy stringToTravisEndpoint(".blah")
  }

  "RealTravisEndpoints" should "return cached token" in {
    val t: String = "notarealtravistoken"
    val rte: RealTravisEndpoints = new RealTravisEndpoints
    rte.travisTokens = Collections.singletonMap("doesnotmatter", "notarealtravistoken")
    val api: TravisAPIEndpoint = TravisOrgEndpoint
    rte.postAuthGitHub(api, "doesnotmatter") should be(t)
  }

} 
开发者ID:atomist-recycling,项目名称:travis-rug-type,代码行数:41,代码来源:TravisEndpointsTest.scala

示例11: Dynomite

//设置package包名称以及导入依赖的类
package com.advancedspark.serving.spark

import com.netflix.dyno.jedis._
import com.netflix.dyno.connectionpool.Host
import com.netflix.dyno.connectionpool.HostSupplier
import com.netflix.dyno.connectionpool.TokenMapSupplier
import com.netflix.dyno.connectionpool.impl.lb.HostToken
import com.netflix.dyno.connectionpool.exception.DynoException
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl
import com.netflix.dyno.connectionpool.impl.ConnectionContextImpl
import com.netflix.dyno.connectionpool.impl.OperationResultImpl
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils

import scala.collection.JavaConversions._
import java.util.Collections
import java.util.Collection
import java.util.Set
import java.util.List

object Dynomite {
  val localhostHost = new Host("127.0.0.1", Host.Status.Up)
  val localhostToken = new HostToken(100000L, localhostHost)

  val localhostHostSupplier = new HostSupplier() {
    @Override
    def getHosts(): Collection[Host] = {
      Collections.singletonList(localhostHost)
    }
  }

  val localhostTokenMapSupplier = new TokenMapSupplier() {
    @Override
    def getTokens(activeHosts: Set[Host]): List[HostToken] = {
      Collections.singletonList(localhostToken)
    }

    @Override
    def getTokenForHost(host: Host, activeHosts: Set[Host]): HostToken = {
      return localhostToken
    }
  }

  val redisPort = 6379
  val client = new DynoJedisClient.Builder()
             .withApplicationName("pipeline")
             .withDynomiteClusterName("pipeline-dynomite")
             .withHostSupplier(localhostHostSupplier)
             .withCPConfig(new ConnectionPoolConfigurationImpl("localhostTokenMapSupplier")
                .withTokenSupplier(localhostTokenMapSupplier))
             .withPort(redisPort)
             .build()
} 
开发者ID:frankiegu,项目名称:serve.ml,代码行数:53,代码来源:Dynomite.scala

示例12: ExecutionContextExecutorServiceFactory

//设置package包名称以及导入依赖的类
package com.evolutiongaming.concurrent

import java.util
import java.util.Collections
import java.util.concurrent.AbstractExecutorService

import scala.concurrent.duration.TimeUnit
import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService}

object ExecutionContextExecutorServiceFactory {
  def apply(ec: ExecutionContext): ExecutionContextExecutorService = ec match {
    case eces: ExecutionContextExecutorService => eces
    case other                                 => new AbstractExecutorService with ExecutionContextExecutorService {
      override def prepare(): ExecutionContext = other
      override def isShutdown = false
      override def isTerminated = false
      override def shutdown(): Unit = ()
      override def shutdownNow(): util.List[Runnable] = Collections.emptyList[Runnable]
      override def execute(runnable: Runnable): Unit = other execute runnable
      override def reportFailure(t: Throwable): Unit = other reportFailure t
      override def awaitTermination(length: Long, unit: TimeUnit): Boolean = false
    }
  }
} 
开发者ID:evolution-gaming,项目名称:executor-tools,代码行数:25,代码来源:ExecutionContextExecutorServiceFactory.scala

示例13: ScanResultIteratorTest

//设置package包名称以及导入依赖的类
package com.mobilerq.awsutil.dynamodb

import java.util.Collections

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.amazonaws.services.dynamodbv2.model.{AttributeValue, ScanRequest, ScanResult}
import org.junit.Assert._
import org.junit.Test
import org.mockito.ArgumentCaptor
import org.mockito.Mockito._
import org.mockito.ArgumentMatchers._

import scala.collection.JavaConverters._

class ScanResultIteratorTest {
  implicit val client = mock(classOf[AmazonDynamoDB])
  val requestCaptor: ArgumentCaptor[ScanRequest] = ArgumentCaptor.forClass(classOf[ScanRequest])

  @Test
  def testIteration(): Unit = {
    val items = Seq(
      Map("item" ? new AttributeValue("1")),
      Map("item" ? new AttributeValue("2")),
      Map("item" ? new AttributeValue("3"))
    )
    when(client.scan(any(classOf[ScanRequest])))
      .thenReturn(new ScanResult()
        .withLastEvaluatedKey(items(1).asJava)
        .withItems(Seq(items(0).asJava, items(1).asJava).asJavaCollection)
        .withCount(2))
      .thenReturn(new ScanResult()
        .withLastEvaluatedKey(items(2).asJava)
        .withItems(Seq(items(2).asJava).asJavaCollection)
        .withCount(1))
      .thenReturn(new ScanResult()
        .withItems(Collections.emptyList[java.util.Map[String, AttributeValue]]())
        .withCount(0))
      .thenThrow(new RuntimeException("ouch"))
    val result = new ScanResultIterator(new ScanRequest).flatMap(_.getItems.asScala).map(_.asScala("item").getS).toList
    assertEquals(List("1", "2", "3"), result)
  }

} 
开发者ID:mobilerq,项目名称:mrq-aws-util,代码行数:44,代码来源:ScanResultIteratorTest.scala

示例14: User

//设置package包名称以及导入依赖的类
package org.jboss.perf.model

import java.util
import java.util.Collections
import org.keycloak.representations.idm.{RoleRepresentation, CredentialRepresentation, UserRepresentation}
import scala.collection.JavaConverters._


case class User(val username: String, val password: String, var id: String, val active: Boolean, val realmRoles: List[String]) {

  def this(map: Map[String, String]) {
    this(map("username"), map("password"), map("id"), true, List())
  }

  def getCredentials: CredentialRepresentation = {
    var credentials = new CredentialRepresentation
    credentials.setType(CredentialRepresentation.PASSWORD)
    credentials.setTemporary(false)
    credentials.setValue(password)
    credentials
  }

  def toMap: Map[String, String] =
    Map(("username", username), ("password", password), ("id", id))

  def toRepresentation: UserRepresentation = {
    var representation = new UserRepresentation
    // Id is ignored
    representation.setUsername(username)
    if (active) {
      representation.setFirstName("Johny");
      representation.setLastName("Active");
    } else {
      representation.setFirstName("Bob");
      representation.setLastName("Sleepy")
    }
    representation.setEnabled(active)
    // Actually the credentials will be ignored on server
    representation.setCredentials(Collections.singletonList(getCredentials))
    representation.setRealmRoles(realmRoles.asJava)
    representation
  }

  def getRealmRoles(roleIds : Map[String, RoleRepresentation]): util.List[RoleRepresentation] = {
    realmRoles.map(r => roleIds.get(r).orNull).asJava
  }
} 
开发者ID:rvansa,项目名称:keycloak-benchmark,代码行数:48,代码来源:User.scala

示例15: CollectionAccumulatorAPIScalaImpl

//设置package包名称以及导入依赖的类
package com.datawizards.sparklocal.impl.scala.accumulator

import java.util.Collections

import com.datawizards.sparklocal.accumulator.{AccumulatorV2API, CollectionAccumulatorAPI}

class CollectionAccumulatorAPIScalaImpl[T](name: Option[String]=None)
  extends AccumulatorV2APIScalaImpl[T, java.util.List[T]](name)
    with CollectionAccumulatorAPI[T] {

  private val _list: java.util.List[T] = Collections.synchronizedList(new java.util.ArrayList[T]())

  override def isZero: Boolean = _list.isEmpty

  override def copy(): AccumulatorV2API[T, java.util.List[T]] = {
    val newAcc = new CollectionAccumulatorAPIScalaImpl[T]
    _list.synchronized {
      newAcc._list.addAll(_list)
    }
    newAcc
  }

  override def reset(): Unit = _list.clear()

  override def add(v: T): Unit = _list.add(v)

  override def merge(other: AccumulatorV2API[T, java.util.List[T]]): Unit = other match {
    case o:CollectionAccumulatorAPIScalaImpl[T] => _list.addAll(o.value)
    case _ => throw new UnsupportedOperationException(
      s"Cannot merge ${this.getClass.getName} with ${other.getClass.getName}")
  }

  override def value: java.util.List[T] = _list.synchronized {
    java.util.Collections.unmodifiableList(new java.util.ArrayList[T](_list))
  }
} 
开发者ID:piotr-kalanski,项目名称:spark-local,代码行数:37,代码来源:CollectionAccumulatorAPIScalaImpl.scala


注:本文中的java.util.Collections类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。