本文整理汇总了Scala中org.scalatest.time.Millis类的典型用法代码示例。如果您正苦于以下问题:Scala Millis类的具体用法?Scala Millis怎么用?Scala Millis使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Millis类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: MemoryBufferSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.s3.impl
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import akka.util.ByteString
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.scalatest.concurrent.ScalaFutures
class MemoryBufferSpec(_system: ActorSystem)
extends TestKit(_system)
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with ScalaFutures {
def this() = this(ActorSystem("MemoryBufferSpec"))
implicit val defaultPatience =
PatienceConfig(timeout = Span(5, Seconds), interval = Span(30, Millis))
implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withDebugLogging(true))
"MemoryBuffer" should "emit a chunk on its output containg the concatenation of all input values" in {
val result = Source(Vector(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12), ByteString(13, 14)))
.via(new MemoryBuffer(200))
.runWith(Sink.seq)
.futureValue
result should have size (1)
val chunk = result.head
chunk.size should be(14)
chunk.data.runWith(Sink.seq).futureValue should be(Seq(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)))
}
it should "fail if more than maxSize bytes are fed into it" in {
whenReady(
Source(Vector(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12), ByteString(13, 14)))
.via(new MemoryBuffer(10))
.runWith(Sink.seq)
.failed
) { e =>
e shouldBe a[IllegalStateException]
}
}
}
示例2: SplitAfterSizeSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.s3.impl
import akka.testkit.TestKit
import akka.stream.ActorMaterializerSettings
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import akka.stream.ActorMaterializer
import akka.actor.ActorSystem
import org.scalatest.Matchers
import org.scalatest.FlatSpecLike
import akka.stream.scaladsl.Source
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import akka.stream.scaladsl.Sink
import org.scalatest.time.{Millis, Seconds, Span}
import scala.concurrent.duration._
class SplitAfterSizeSpec(_system: ActorSystem)
extends TestKit(_system)
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with ScalaFutures {
def this() = this(ActorSystem("SplitAfterSizeSpec"))
implicit val defaultPatience =
PatienceConfig(timeout = Span(5, Seconds), interval = Span(30, Millis))
implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withDebugLogging(true))
"SplitAfterSize" should "yield a single empty substream on no input" in {
Source
.empty[ByteString]
.via(
SplitAfterSize(10)(Flow[ByteString]).concatSubstreams
)
.runWith(Sink.seq)
.futureValue should be(Seq.empty)
}
it should "start a new stream after the element that makes it reach a maximum, but not split the element itself" in {
Source(Vector(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12), ByteString(13, 14)))
.via(
SplitAfterSize(10)(Flow[ByteString]).prefixAndTail(10).map { case (prefix, tail) => prefix }.concatSubstreams
)
.runWith(Sink.seq)
.futureValue should be(
Seq(
Seq(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12)),
Seq(ByteString(13, 14))
)
)
}
}
示例3: TestBase
//设置package包名称以及导入依赖的类
package uk.co.appministry.scathon.client
import org.scalatest.concurrent._
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Inside, Matchers, WordSpec}
import uk.co.appministry.scathon.testServer.TestMarathon
class TestBase extends WordSpec
with Eventually
with ScalaFutures
with Inside
with Matchers
with BeforeAndAfterAll
with AsyncAssertions {
implicit override val patienceConfig = PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(100, Millis)))
var client: Client = _
var server: TestMarathon = _
override def beforeAll: Unit = {
server = new TestMarathon
server.start()
client = new Client(port = server.port.get)
}
override def afterAll: Unit = {
server.stop()
}
}
示例4: executionContext
//设置package包名称以及导入依赖的类
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision}
import akka.testkit.{TestKit, TestKitBase}
import com.taxis99.amazon.sns.SnsClientFactory
import com.taxis99.amazon.sqs.SqsClientFactory
import com.typesafe.config.ConfigFactory
import org.scalatest._
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{Millis, Minute, Span}
import scala.concurrent.ExecutionContext
package object it {
trait IntegrationSpec extends AsyncFlatSpec with Matchers with OptionValues with PatienceConfiguration
with TestKitBase with BeforeAndAfterAll {
implicit lazy val system: ActorSystem = ActorSystem("test", ConfigFactory.parseString("""
akka.actor.deployment.default.dispatcher = "akka.test.calling-thread-dispatcher"
"""))
override implicit def executionContext: ExecutionContext = system.dispatcher
override implicit def patienceConfig = PatienceConfig(timeout = Span(1, Minute), interval = Span(5, Millis))
implicit lazy val amazonSqsConn = SqsClientFactory.atLocalhost(9324)
implicit lazy val amazonSnsConn = SnsClientFactory.atLocalhost(9292)
val decider: Supervision.Decider = {
case _ => Supervision.Stop
}
val settings = ActorMaterializerSettings(system).withSupervisionStrategy(decider)
implicit lazy val materializer = ActorMaterializer(settings)
override def afterAll {
TestKit.shutdownActorSystem(system)
}
}
}
示例5: aultPatientConfig
//设置package包名称以及导入依赖的类
package co.s4n.infrastructure.database
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.time.{ Millis, Seconds, Span }
import scala.concurrent.duration._
trait CassandraSpec extends TestSuite {
implicit val defaultPatientConfig =
PatienceConfig(timeout = Span(15, Seconds), interval = Span(500, Millis))
override def beforeAll(): Unit = {
synchronized {
EmbeddedCassandraServerHelper
.startEmbeddedCassandra("embedded-cassandra.yaml", 120.seconds.toMillis)
database.create(5.seconds)
super.beforeAll()
}
}
}
示例6: BankProductRepositoryTest
//设置package包名称以及导入依赖的类
package com.knol.db.repo
import org.scalatest.concurrent.ScalaFutures
import com.knol.db.connection.H2DBComponent
import org.scalatest.FunSuite
import org.scalatest.time.Seconds
import org.scalatest.time.Millis
import org.scalatest.time.Span
class BankProductRepositoryTest extends FunSuite with BankProductRepository with H2DBComponent with ScalaFutures {
implicit val defaultPatience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
test("Add new Product ") {
val response = create(BankProduct("car loan", 1))
whenReady(response) { productId =>
assert(productId === 3)
}
}
test("Update bank product ") {
val response = update(BankProduct("Home Loan", 1, Some(1)))
whenReady(response) { res =>
assert(res === 1)
}
}
test("Delete bank info ") {
val response = delete(1)
whenReady(response) { res =>
assert(res === 1)
}
}
test("Get product list") {
val products = getAll()
whenReady(products) { result =>
assert(result === List(BankProduct("home loan", 1, Some(1)), BankProduct("eduction loan", 1, Some(2))))
}
}
test("Get bank and their product list") {
val bankProduct = getBankWithProduct()
whenReady(bankProduct) { result =>
assert(result === List((Bank("SBI bank", Some(1)), BankProduct("home loan", 1, Some(1))), (Bank("SBI bank", Some(1)), BankProduct("eduction loan", 1, Some(2)))))
}
}
test("Get all bank and product list") {
val bankProduct = getAllBankWithProduct()
whenReady(bankProduct) { result =>
assert(result === List((Bank("SBI bank", Some(1)), Some(BankProduct("home loan", 1, Some(1)))), (Bank("SBI bank", Some(1)), Some(BankProduct("eduction loan", 1, Some(2)))), (Bank("PNB bank", Some(2)), None)))
}
}
}
示例7: BankRepositoryTest
//设置package包名称以及导入依赖的类
package com.knol.db.repo
import org.scalatest.FunSuite
import com.knol.db.connection.H2DBComponent
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
class BankRepositoryTest extends FunSuite with BankRepository with H2DBComponent with ScalaFutures {
implicit val defaultPatience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
test("Add new bank ") {
val response = create(Bank("ICICI bank"))
whenReady(response) { bankId =>
assert(bankId === 3)
}
}
test("Update SBI bank ") {
val response = update(Bank("SBI Bank", Some(1)))
whenReady(response) { res =>
assert(res === 1)
}
}
test("Delete SBI bank ") {
val response = delete(2)
whenReady(response) { res =>
assert(res === 1)
}
}
test("Get bank list") {
val bankList = getAll()
whenReady(bankList) { result =>
assert(result === List(Bank("SBI bank", Some(1)), Bank("PNB bank", Some(2))))
}
}
}
示例8: BankInfoRepositoryTest
//设置package包名称以及导入依赖的类
package com.knol.db.repo
import org.scalatest.concurrent.ScalaFutures
import com.knol.db.connection.H2DBComponent
import org.scalatest.FunSuite
import org.scalatest.time.Seconds
import org.scalatest.time.Millis
import org.scalatest.time.Span
class BankInfoRepositoryTest extends FunSuite with BankInfoRepository with H2DBComponent with ScalaFutures {
implicit val defaultPatience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
test("Add new bank info") {
val response = create(BankInfo("Goverment", 1000, 1))
whenReady(response) { bankInfoId =>
assert(bankInfoId === 2)
}
}
test("Update bank info ") {
val response = update(BankInfo("goverment", 18989, 1, Some(1)))
whenReady(response) { res =>
assert(res === 1)
}
}
test("Delete bank info ") {
val response = delete(1)
whenReady(response) { res =>
assert(res === 1)
}
}
test("Get bank info list") {
val bankInfo = getAll()
whenReady(bankInfo) { result =>
assert(result === List(BankInfo("goverment", 10000, 1, Some(1))))
}
}
test("Get bank and their info list") {
val bankInfo = getBankWithInfo()
whenReady(bankInfo) { result =>
assert(result === List((Bank("SBI bank", Some(1)), BankInfo("goverment", 10000, 1, Some(1)))))
}
}
test("Get all bank and info list") {
val bankInfo = getAllBankWithInfo()
whenReady(bankInfo) { result =>
assert(result === List((Bank("SBI bank", Some(1)), Some(BankInfo("goverment", 10000, 1, Some(1)))), (Bank("PNB bank", Some(2)), None)))
}
}
}
示例9: PerpetualStreamTest
//设置package包名称以及导入依赖的类
package de.choffmeister.microserviceutils
import akka.Done
import akka.actor.{ActorSystem, Props}
import akka.stream.scaladsl.{Concat, Keep, Sink, Source}
import akka.stream.{KillSwitches, ThrottleMode}
import akka.testkit.{TestKit, TestProbe}
import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import scala.collection.mutable.ListBuffer
import scala.concurrent.Future
import scala.concurrent.duration._
class PerpetualStreamTest extends TestKit(ActorSystem()) with FlatSpecLike with Matchers with Eventually with BeforeAndAfterAll {
implicit val defaultPatience = PatienceConfig(timeout = Span(30, Seconds), interval = Span(100, Millis))
"PerpetualStream" should "work" in {
val list = ListBuffer.empty[Int]
val stream = system.actorOf(Props(new NumberGeneratingPerpetualStream(list)))
val probe = new TestProbe(system)
probe.watch(stream)
eventually(require(list.length >= 20, "Must have emitted at least 20 numbers"))
list.take(20) should be((-10 until 10).toList)
GracefulShutdownExtension(system).triggerGracefulShutdown()
probe.expectTerminated(stream)
}
override def afterAll(): Unit = TestKit.shutdownActorSystem(system)
class NumberGeneratingPerpetualStream(list: ListBuffer[Int]) extends PerpetualStream {
var count = 0
override def stream = {
val currentCount = count
count = count + 1
val source = currentCount match {
case 0 => Source.combine(Source(-10 until 0), Source.failed[Int](new RuntimeException("ERROR")))(Concat(_))
case 1 => Source(0 until 10)
case _ => Source(10 to 100000).throttle(1000, 100.millis, 100, ThrottleMode.shaping)
}
source
.via(watcher)
.viaMat(KillSwitches.single)(Keep.right)
.map { i => list.append(i); i }
.to(Sink.ignore)
.mapMaterializedValue(ks => () => {
ks.shutdown()
Future.successful(Done)
})
}
}
}
示例10: StreamUtilsSpec
//设置package包名称以及导入依赖的类
package com.bluelabs.akkaaws
import java.security.{DigestInputStream, MessageDigest}
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Source, StreamConverters}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.testkit.TestKit
import akka.util.ByteString
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{FlatSpecLike, Matchers}
import scala.concurrent.Future
class StreamUtilsSpec(_system: ActorSystem) extends TestKit(_system) with FlatSpecLike with Matchers with ScalaFutures {
def this() = this(ActorSystem("StreamUtilsSpec"))
implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withDebugLogging(true))
implicit val defaultPatience =
PatienceConfig(timeout = Span(5, Seconds), interval = Span(30, Millis))
"digest" should "calculate the digest of a short string" in {
val bytes: Array[Byte] = "abcdefghijklmnopqrstuvwxyz".getBytes()
val flow: Future[ByteString] = Source.single(ByteString(bytes)).runWith(StreamUtils.digest())
val testDigest = MessageDigest.getInstance("SHA-256").digest(bytes)
whenReady(flow) { result =>
result should contain theSameElementsInOrderAs testDigest
}
}
it should "calculate the digest of a file" in {
val input = StreamConverters.fromInputStream(() => getClass.getResourceAsStream("/testdata.txt"))
val flow: Future[ByteString] = input.runWith(StreamUtils.digest())
val testDigest = MessageDigest.getInstance("SHA-256")
val dis: DigestInputStream = new DigestInputStream(getClass.getResourceAsStream("/testdata.txt"), testDigest)
val buffer = new Array[Byte](1024)
var bytesRead: Int = dis.read(buffer)
while (bytesRead > -1) {
bytesRead = dis.read(buffer)
}
whenReady(flow) { result =>
result should contain theSameElementsInOrderAs dis.getMessageDigest.digest()
}
}
}
示例11: dockerInitPatienceInterval
//设置package包名称以及导入依赖的类
package integration
import com.spotify.docker.client.DefaultDockerClient
import com.typesafe.scalalogging.StrictLogging
import com.whisk.docker.DockerFactory
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Second, Seconds, Span}
import play.api.libs.ws.ahc.StandaloneAhcWSClient
import play.api.test.NoMaterializer
import scala.concurrent.duration._
trait IntegrationSpec extends FlatSpec with Matchers with ScalaFutures with StrictLogging with BeforeAndAfterAll
with DockerKairosDBService
with DockerTestKit {
override val StartContainersTimeout = 30.seconds
implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))
override def dockerInitPatienceInterval = PatienceConfig(scaled(Span(30, Seconds)), scaled(Span(10, Millis)))
override implicit val dockerFactory: DockerFactory = new SpotifyDockerFactory(DefaultDockerClient.fromEnv().build())
implicit val materializer = NoMaterializer
val wsClient = StandaloneAhcWSClient()
override def afterAll(): Unit = {
wsClient.close()
super.afterAll()
}
}
示例12: StreamingSpec
//设置package包名称以及导入依赖的类
package com.beachape.sparkka
import akka.actor.ActorSystem
import akka.stream.{ SourceShape, ActorMaterializer }
import akka.stream.scaladsl._
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ Matchers, FunSpec }
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
class StreamingSpec extends FunSpec with ScalaFutures with Matchers with Eventually {
override implicit val patienceConfig: PatienceConfig = PatienceConfig(Span(30, Seconds), Span(150, Millis))
describe("streamConnection") {
it("should allow both the original flow and the connected InputDStream to receive all expected values") {
implicit val actorSystem = ActorSystem()
implicit val materializer = ActorMaterializer()
implicit val ssc = LocalContext.ssc
// InputDStream can then be used to build elements of the graph that require integration with Spark
val (inputDStream, feedDInput) = Streaming.connection[Int]()
val source = Source.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val source = Source(1 to 10)
val bCast = builder.add(Broadcast[Int](2))
val merge = builder.add(Merge[Int](2))
val add1 = Flow[Int].map(_ + 1)
val times3 = Flow[Int].map(_ * 3)
source ~> bCast ~> add1 ~> merge
bCast ~> times3 ~> feedDInput ~> merge
SourceShape(merge.out)
})
val reducedFlow = source.runWith(Sink.fold(0)(_ + _))
whenReady(reducedFlow)(_ shouldBe 230)
val sharedVar = ssc.sparkContext.accumulator(0)
inputDStream.foreachRDD { rdd =>
rdd.foreach { i =>
sharedVar += i
}
}
ssc.start()
eventually(sharedVar.value shouldBe 165)
}
}
}
示例13: StatusApiSpec
//设置package包名称以及导入依赖的类
package name.denyago.yasc.integration.httpapi
import name.denyago.yasc.Main
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Exceptional, FunSpec, Matchers, Outcome}
import scala.util.Try
import scalaj.http.Http
class StatusApiSpec extends FunSpec with Matchers with Eventually {
override def withFixture(test: NoArgTest): Outcome = {
val app = new Main(Array.empty[String])
app.run()
val outcome = Try { super.withFixture(test) }.recover {
case t: Throwable => Exceptional(t)
}.getOrElse(
Exceptional(new RuntimeException("No test outcome present"))
)
app.stop()
outcome
}
describe("Status HTTP API") {
implicit val patienceConfig =
PatienceConfig(timeout = scaled(Span(5, Seconds)), interval = scaled(Span(100, Millis)))
it("should return OK status when the server up and running") {
eventually {
val response = Http("http://localhost:8888/status").asString
response.code shouldEqual 200
response.body shouldEqual "{\"status\":\"OK\"}"
}
}
}
}
示例14: OneSpecToRuleThemAll
//设置package包名称以及导入依赖的类
package com.vatbox.epoximise
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.time.{Millis, Seconds, Span}
import scala.collection.mutable
class OneSpecToRuleThemAll extends AsyncFreeSpecLike with ScalaFutures with GeneratorDrivenPropertyChecks with Matchers with BeforeAndAfterAll{
val dao = SimpleDao
override implicit val patienceConfig: PatienceConfig = PatienceConfig(Span(10,Seconds),Span(100, Millis))
"Generate random objects and insert them to DB" in {
var buffer = mutable.Buffer[UpperLevel]()
forAll(Generators.upperLevelGen,MinSuccessful(100)) { obj =>
val future = dao.insert(obj)
future.map { _ =>
buffer += obj
}
}
dao.find[UpperLevel]().map { seq =>
seq should contain theSameElementsAs buffer
}
}
override protected def beforeAll(): Unit = {
dao.clean().futureValue
}
}
示例15: TimeFetchTest
//设置package包名称以及导入依赖的类
package rere.driver
import java.time.{LocalDateTime, ZoneOffset}
import akka.actor.{ActorSystem, Terminated}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}
import rere.driver.pool.ConnectionPool
import scala.concurrent.ExecutionContext
class TimeFetchTest extends WordSpec with ScalaFutures with Matchers {
implicit val defaultPatience =
PatienceConfig(timeout = Span(15, Seconds), interval = Span(100, Millis))
"driver" should {
"fetch time" in {
implicit val ec = ExecutionContext.global
implicit val system = ActorSystem("rere")
val credentials = Credentials("admin", "")
val settings = ConnectionSettings("127.0.0.1", 28015, ConnectionSettings.noSslConnection)
val poolSize = 1
val pool = ConnectionPool.create(credentials, settings, "pool", poolSize)
import rere.driver.runners.all._
import rere.ql.queries.all._
val utcOffset = ZoneOffset.UTC
val now = LocalDateTime.now(utcOffset)
whenReady(r.now().run(pool).future()) { result =>
Math.abs(
result.toLocalDateTime.toInstant(utcOffset).getEpochSecond - now.toInstant(utcOffset).getEpochSecond
) should be < 10L
Math.abs(
result.toInstant.getEpochSecond - now.toInstant(utcOffset).getEpochSecond
) should be < 10L
whenReady(pool.shutdown()) { shutdownResult =>
shutdownResult.queriesStarted shouldBe 1L
shutdownResult.connectionsTurnedOff shouldBe poolSize
whenReady(system.terminate()) { terminationResult =>
terminationResult shouldBe an[Terminated]
}
}
}
}
}
}