本文整理汇总了Scala中akka.stream.scaladsl.Sink类的典型用法代码示例。如果您正苦于以下问题:Scala Sink类的具体用法?Scala Sink怎么用?Scala Sink使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Sink类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ProcessingKafkaApplication
//设置package包名称以及导入依赖的类
package com.packt.chapter8
import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}
import scala.concurrent.duration._
object ProcessingKafkaApplication extends App {
implicit val actorSystem = ActorSystem("SimpleStream")
implicit val actorMaterializer = ActorMaterializer()
val bootstrapServers = "localhost:9092"
val kafkaTopic = "akka_streams_topic"
val partition = 0
val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))
val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers(bootstrapServers)
.withGroupId("akka_streams_group")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers(bootstrapServers)
val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
val kafkaSink = Producer.plainSink(producerSettings)
val printlnSink = Sink.foreach(println)
val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())
tickSource ~> mapToProducerRecord ~> kafkaSink
kafkaSource ~> mapFromConsumerRecord ~> printlnSink
ClosedShape
})
runnableGraph.run()
}
示例2: Pusher
//设置package包名称以及导入依赖的类
package reactivehub.akka.stream.apns.pusher
import akka.actor.ActorSystem
import akka.kafka.ConsumerSettings
import akka.kafka.scaladsl.Consumer
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink}
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.handler.ssl.SslContext
import org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG
import reactivehub.akka.stream.apns.Environment._
import reactivehub.akka.stream.apns.TlsUtil._
import reactivehub.akka.stream.apns._
import reactivehub.akka.stream.apns.marshallers.SprayJsonSupport
object Pusher extends SprayJsonSupport {
val kafka = "192.168.99.100:9092"
val clientId = "pusher1"
val consumerGroup = "pusher"
val topics = Set("notifications")
implicit val system = ActorSystem("system")
implicit val materializer = ActorMaterializer()
import system.dispatcher
def main(args: Array[String]): Unit = {
val group = new NioEventLoopGroup()
val apns = ApnsExt(system).connection[Long](Development, sslContext, group)
Consumer.atMostOnceSource(consumerSettings)
.map(msg => msg.key -> toNotification(msg.value))
.filter(_._2.deviceToken.bytes.length < 100)
.viaMat(apns)(Keep.right)
.log("pusher", _.toString())
.to(Sink.ignore).run()
.onComplete { _ =>
group.shutdownGracefully()
system.terminate()
}
}
private def sslContext: SslContext =
loadPkcs12FromResource("/cert.p12", "password")
private def consumerSettings: ConsumerSettings[Long, PushData] =
ConsumerSettings(system, ScalaLongDeserializer, PushDataDeserializer, topics)
.withBootstrapServers(kafka)
.withClientId(clientId)
.withGroupId(consumerGroup)
.withProperty(AUTO_OFFSET_RESET_CONFIG, "earliest")
private def toNotification(pushData: PushData): Notification = {
var builder = Payload.Builder()
pushData.alert.foreach(alert => builder = builder.withAlert(alert))
pushData.badge.foreach(badge => builder = builder.withBadge(badge))
Notification(DeviceToken(pushData.token), builder.result)
}
}
示例3: HTableStage
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase.javadsl
import akka.stream.alpakka.hbase.HTableSettings
import akka.stream.alpakka.hbase.internal.HBaseFlowStage
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.{Done, NotUsed}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put
import scala.collection.immutable
import scala.concurrent.Future
object HTableStage {
def table[T](conf: Configuration,
tableName: TableName,
columnFamilies: java.util.List[String],
converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
import scala.compat.java8.FunctionConverters._
import scala.collection.JavaConverters._
HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
}
def sink[A](config: HTableSettings[A]): akka.stream.javadsl.Sink[A, Future[Done]] =
Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).asJava
def flow[A](settings: HTableSettings[A]): akka.stream.javadsl.Flow[A, A, NotUsed] =
Flow.fromGraph(new HBaseFlowStage[A](settings)).asJava
}
示例4: PlayerServiceImpl
//设置package包名称以及导入依赖的类
package com.chriswk.gameranker.player.impl
import java.util.UUID
import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import com.chriswk.gameranker.player.api
import com.chriswk.gameranker.player.api.PlayerService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.transport.NotFound
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import scala.concurrent.ExecutionContext
class PlayerServiceImpl(registry: PersistentEntityRegistry, system: ActorSystem)(implicit ec: ExecutionContext, mat: Materializer) extends PlayerService {
private val currentIdsQuery = PersistenceQuery(system).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
override def createPlayer = ServiceCall { createPlayer =>
val playerId = UUID.randomUUID()
refFor(playerId).ask(CreatePlayer(createPlayer.name)).map { _ =>
api.Player(playerId, createPlayer.name)
}
}
override def getPlayer(playerId: UUID) = ServiceCall { _ =>
refFor(playerId).ask(GetPlayer).map {
case Some(player) => api.Player(playerId, player.name)
case None => throw NotFound(s"Player with id $playerId")
}
}
private def refFor(playerId: UUID) = registry.refFor[PlayerEntity](playerId.toString)
override def getPlayers = ServiceCall { _ =>
currentIdsQuery.currentPersistenceIds()
.filter(_.startsWith("PlayerEntity|"))
.mapAsync(4) { id =>
val entityId = id.split("\\|", 2).last
registry.refFor[PlayerEntity](entityId)
.ask(GetPlayer)
.map(_.map(player => api.Player(UUID.fromString(entityId), player.name)))
}
.collect {
case Some(p) => p
}
.runWith(Sink.seq)
}
}
示例5: MovieListPipeline
//设置package包名称以及导入依赖的类
package com.stacktrace.yo.scrapeline.imdb.pipelines
import java.nio.file.Paths
import akka.NotUsed
import akka.stream.scaladsl.{FileIO, Flow, Keep, Sink, Source}
import akka.stream.{ActorMaterializer, IOResult}
import akka.util.ByteString
import com.stacktrace.yo.scrapeline.core.ScrapeClient.jsoup
import com.stacktrace.yo.scrapeline.core._
import com.stacktrace.yo.scrapeline.imdb.Domain.MovieNameAndDetailUrl
import net.ruippeixotog.scalascraper.dsl.DSL._
import net.ruippeixotog.scalascraper.model.Document
import net.ruippeixotog.scalascraper.scraper.ContentExtractors.elementList
import scala.concurrent.Future
class MovieListPipeline(implicit val m: ActorMaterializer) {
def getPipelineSource: Source[jsoup.DocumentType, NotUsed] = Source.single(ScrapeClient.scrape("http://www.the-numbers.com/movie/budgets/all"))
def getParseFlow: Flow[Document, MovieNameAndDetailUrl, NotUsed] = {
Flow[Document]
.mapConcat(doc => {
val table = doc >> elementList("table tr")
val movieLinkTuples = table.flatMap(tr => {
val name = tr >> elementList("tr b a")
name.map(
link => {
MovieNameAndDetailUrl(link.text, "http://www.the-numbers.com/" + link.attr("href"))
}
)
})
movieLinkTuples
})
}
def getPipeOut: Sink[MovieNameAndDetailUrl, Future[IOResult]] = Flow[MovieNameAndDetailUrl]
.map(s => ByteString(s.name + "\n"))
.toMat(FileIO.toPath(Paths.get("movie.txt")))(Keep.right)
def buildAndRun: Future[IOResult] = {
getPipelineSource
.via(getParseFlow)
.runWith(getPipeOut)
}
}
示例6: offerBlocking
//设置package包名称以及导入依赖的类
package sample.stream
import java.lang.Exception
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult, ThrottleMode}
import akka.stream.scaladsl.{Sink, Source, SourceQueue, SourceQueueWithComplete}
import scala.concurrent.{Await, Future, TimeoutException}
import scala.concurrent.duration._
import scala.util.Random
def offerBlocking(elem: T, maxWait: Duration = 1.seconds): Future[QueueOfferResult] =
synchronized {
//offer returns a Future, which completes with the result of the enqueue operation
//must only be used from a single thread
val result = q.offer(elem)
Await.ready(result, maxWait)
result
}
}
def asyncOp(userID: Long): Future[String] = {
Thread.sleep(10) //without waiting time we see a lot of "Future(<not completed>)"
try {
if (Random.nextInt() % 2 == 0) {
println("asyncOp: random exception")
throw new RuntimeException("random exception")
}
} catch {
case ex: RuntimeException => Future {
"Exception"
}
}
Future (s"user: $userID")
}
val targetQueue =
Source.queue[Future[String]](Int.MaxValue, OverflowStrategy.backpressure)
.to(Sink.foreach(println))
.run()
val targetSyncQueue = new SyncQueue(targetQueue)
Source(1 to Int.MaxValue)
//.throttle(1000, 1.second, 1, ThrottleMode.shaping)
.mapAsync(1)(x => targetSyncQueue.offerBlocking(asyncOp(x)))
.runWith(Sink.ignore)
}
}
示例7: FlowFromGraph
//设置package包名称以及导入依赖的类
package sample.graphDSL
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, Sink, Source}
import akka.stream.{ActorMaterializer, FlowShape, UniformFanInShape, UniformFanOutShape}
object FlowFromGraph {
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem("FlowFromGraph")
implicit val ec = system.dispatcher
implicit val materializer = ActorMaterializer()
val processorFlow1: Flow[Int, Int, NotUsed] = Flow[Int].map(_ * 2)
val processorFlow2: Flow[Int, Int, NotUsed] = Flow[Int].map(_ * 3)
val listOfFlows = List(processorFlow1, processorFlow2)
def compoundFlowFrom[T](indexFlows: Seq[Flow[T, T, NotUsed]]): Flow[T, T, NotUsed] = {
require(indexFlows.nonEmpty, "Cannot create compound flow without any flows to combine")
Flow.fromGraph(GraphDSL.create() { implicit b =>
import akka.stream.scaladsl.GraphDSL.Implicits._
val broadcast: UniformFanOutShape[T, T] = b.add(Broadcast(indexFlows.size))
val merge: UniformFanInShape[T, T] = b.add(Merge(indexFlows.size))
indexFlows.foreach(broadcast ~> _ ~> merge)
FlowShape(broadcast.in, merge.out)
})
}
val compoundFlow = compoundFlowFrom(listOfFlows)
Source(1 to 10)
.via(compoundFlow)
.runWith(Sink.foreach(println(_)))
.onComplete(_ => system.terminate())
}
}
示例8: Main
//设置package包名称以及导入依赖的类
package edu.uw.at.iroberts.wirefugue.sensor
import java.nio.file.Paths
import akka.actor.ActorSystem
import akka.kafka.{ProducerMessage, ProducerSettings}
import akka.kafka.scaladsl.Producer
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{FileIO, Sink}
import com.typesafe.config.ConfigFactory
import edu.uw.at.iroberts.wirefugue.pcap.PcapFileRaw.LinkType
import edu.uw.at.iroberts.wirefugue.pcap._
import edu.uw.at.iroberts.wirefugue.kafka.producer.{KafkaKey, PacketProducer}
import edu.uw.at.iroberts.wirefugue.kafka.serdes.PacketSerializer
import edu.uw.at.iroberts.wirefugue.protocol.overlay.{Ethernet, IPV4Datagram}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, IntegerSerializer}
import scala.concurrent.Await
import scala.concurrent.duration._
object Main {
def main(args: Array[String]): Unit = {
if (args.length < 1) {
println("Please specify a filename as the first argument")
System.exit(1)
}
val config = ConfigFactory.load("application.conf")
implicit val system = ActorSystem("stream-producer-system", config)
implicit val materializer = ActorMaterializer()
val producerSettings = ProducerSettings[Integer, Packet](system, None, None)
val doneF = PcapSource(Paths.get(args(0)).toUri)
.filter( p => p.network == LinkType.ETHERNET && p.ip.isDefined )
.map( packet => new ProducerRecord[Integer, Packet]("packets", packet.key.##, packet))
.map( pr => new ProducerMessage.Message[Integer, Packet, Unit](pr, ()))
.via(Producer.flow(producerSettings))
.runWith(Sink.foreach(println))
try {
Await.ready(doneF, 10 seconds)
}
finally {
system.terminate()
}
}
}
示例9: stringPersister
//设置package包名称以及导入依赖的类
package services
import javax.inject.{Inject, Singleton}
import akka.NotUsed
import akka.stream._
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Sink}
import play.api.Logger
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Future
import scala.util.{Failure, Success}
def stringPersister(pf: String => Future[Unit]): Flow[String, String, NotUsed] =
Flow.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val persistenceSink = Sink.foreach[String] { content =>
val f = pf(content)
f.onComplete {
case Success(u) => Logger.debug(s"Persisted content: '$content'")
case Failure(t) => Logger.error(s"Failed to persist content: '$content", t)
}
}
val bcast = builder.add(Broadcast[String](2))
bcast.out(1) ~> persistenceSink
FlowShape(bcast.in, bcast.out(0))
})
}
示例10: toPath
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.ftp.scaladsl
import akka.NotUsed
import akka.stream.IOResult
import akka.stream.alpakka.ftp.impl.{FtpLike, FtpSourceFactory, FtpSourceParams, FtpsSourceParams, SftpSourceParams}
import akka.stream.alpakka.ftp.{FtpFile, RemoteFileSettings}
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import net.schmizz.sshj.SSHClient
import org.apache.commons.net.ftp.FTPClient
import scala.concurrent.Future
sealed trait FtpApi[FtpClient] { _: FtpSourceFactory[FtpClient] =>
def toPath(
path: String,
connectionSettings: S,
append: Boolean = false
): Sink[ByteString, Future[IOResult]] =
Sink.fromGraph(createIOSink(path, connectionSettings, append))
protected[this] implicit def ftpLike: FtpLike[FtpClient, S]
}
object Ftp extends FtpApi[FTPClient] with FtpSourceParams
object Ftps extends FtpApi[FTPClient] with FtpsSourceParams
object Sftp extends FtpApi[SSHClient] with SftpSourceParams
示例11: listFiles
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.ftp
import akka.NotUsed
import akka.stream.alpakka.ftp.FtpCredentials.AnonFtpCredentials
import akka.stream.alpakka.ftp.scaladsl.Sftp
import akka.stream.IOResult
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import scala.concurrent.Future
import java.net.InetAddress
trait BaseSftpSpec extends SftpSupportImpl with BaseSpec {
//#create-settings
val settings = SftpSettings(
InetAddress.getByName("localhost"),
getPort,
AnonFtpCredentials,
strictHostKeyChecking = false,
knownHosts = None,
sftpIdentity = None
)
//#create-settings
protected def listFiles(basePath: String): Source[FtpFile, NotUsed] =
Sftp.ls(basePath, settings)
protected def retrieveFromPath(path: String): Source[ByteString, Future[IOResult]] =
Sftp.fromPath(path, settings)
protected def storeToPath(path: String, append: Boolean): Sink[ByteString, Future[IOResult]] =
Sftp.toPath(path, settings, append)
}
示例12: listFiles
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.ftp
import akka.NotUsed
import akka.stream.IOResult
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Inside, Matchers, WordSpecLike}
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, Future}
trait BaseSpec
extends WordSpecLike
with Matchers
with BeforeAndAfter
with BeforeAndAfterAll
with ScalaFutures
with IntegrationPatience
with Inside
with AkkaSupport
with FtpSupport {
protected def listFiles(basePath: String): Source[FtpFile, NotUsed]
protected def retrieveFromPath(path: String): Source[ByteString, Future[IOResult]]
protected def storeToPath(path: String, append: Boolean): Sink[ByteString, Future[IOResult]]
protected def startServer(): Unit
protected def stopServer(): Unit
after {
cleanFiles()
}
override protected def beforeAll() = {
super.beforeAll()
startServer()
}
override protected def afterAll() = {
stopServer()
Await.ready(getSystem.terminate(), 42.seconds)
super.afterAll()
}
}
示例13: IronMqPullStageSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.ironmq
import akka.stream.scaladsl.{Sink, Source}
import scala.concurrent.ExecutionContext.Implicits.global
class IronMqPullStageSpec extends UnitSpec with IronMqFixture with AkkaStreamFixture {
"IronMqSourceStage" when {
"there are messages" should {
"consume all messages" in {
val queue = givenQueue()
val messages = (1 to 100).map(i => PushMessage(s"test-$i"))
ironMqClient.pushMessages(queue.name, messages: _*).futureValue
val source = Source.fromGraph(new IronMqPullStage(queue.name, IronMqSettings()))
val receivedMessages = source.take(100).runWith(Sink.seq).map(_.map(_.message.body)).futureValue
val expectedMessages = messages.map(_.body)
receivedMessages should contain theSameElementsInOrderAs expectedMessages
}
}
}
}
示例14: MemoryBufferSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.s3.impl
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import akka.util.ByteString
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.scalatest.concurrent.ScalaFutures
class MemoryBufferSpec(_system: ActorSystem)
extends TestKit(_system)
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with ScalaFutures {
def this() = this(ActorSystem("MemoryBufferSpec"))
implicit val defaultPatience =
PatienceConfig(timeout = Span(5, Seconds), interval = Span(30, Millis))
implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withDebugLogging(true))
"MemoryBuffer" should "emit a chunk on its output containg the concatenation of all input values" in {
val result = Source(Vector(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12), ByteString(13, 14)))
.via(new MemoryBuffer(200))
.runWith(Sink.seq)
.futureValue
result should have size (1)
val chunk = result.head
chunk.size should be(14)
chunk.data.runWith(Sink.seq).futureValue should be(Seq(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)))
}
it should "fail if more than maxSize bytes are fed into it" in {
whenReady(
Source(Vector(ByteString(1, 2, 3, 4, 5), ByteString(6, 7, 8, 9, 10, 11, 12), ByteString(13, 14)))
.via(new MemoryBuffer(10))
.runWith(Sink.seq)
.failed
) { e =>
e shouldBe a[IllegalStateException]
}
}
}
示例15: DynamoClientImpl
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.dynamodb.impl
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.MediaType.NotCompressible
import akka.http.scaladsl.model.{ContentType, MediaType}
import akka.stream.Materializer
import akka.stream.alpakka.dynamodb.AwsOp
import akka.stream.alpakka.dynamodb.impl.AwsClient.{AwsConnect, AwsRequestMetadata}
import akka.stream.scaladsl.{Sink, Source}
import com.amazonaws.AmazonServiceException
import com.amazonaws.http.HttpResponseHandler
class DynamoClientImpl(
val settings: DynamoSettings,
val errorResponseHandler: HttpResponseHandler[AmazonServiceException]
)(implicit protected val system: ActorSystem, implicit protected val materializer: Materializer)
extends AwsClient[DynamoSettings] {
override protected val service = "dynamodb"
override protected val defaultContentType =
ContentType.Binary(MediaType.customBinary("application", "x-amz-json-1.0", NotCompressible))
override protected implicit val ec = system.dispatcher
override protected val connection: AwsConnect =
if (settings.port == 443)
Http().cachedHostConnectionPoolHttps[AwsRequestMetadata](settings.host)(materializer)
else
Http().cachedHostConnectionPool[AwsRequestMetadata](settings.host, settings.port)(materializer)
def single(op: AwsOp) = Source.single(op).via(flow).map(_.asInstanceOf[op.B]).runWith(Sink.head)
}