本文整理汇总了Scala中org.apache.kafka.clients.consumer.ConsumerRecord类的典型用法代码示例。如果您正苦于以下问题:Scala ConsumerRecord类的具体用法?Scala ConsumerRecord怎么用?Scala ConsumerRecord使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConsumerRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ProcessingKafkaApplication
//设置package包名称以及导入依赖的类
package com.packt.chapter8
import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}
import scala.concurrent.duration._
object ProcessingKafkaApplication extends App {
implicit val actorSystem = ActorSystem("SimpleStream")
implicit val actorMaterializer = ActorMaterializer()
val bootstrapServers = "localhost:9092"
val kafkaTopic = "akka_streams_topic"
val partition = 0
val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))
val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers(bootstrapServers)
.withGroupId("akka_streams_group")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers(bootstrapServers)
val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
val kafkaSink = Producer.plainSink(producerSettings)
val printlnSink = Sink.foreach(println)
val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())
tickSource ~> mapToProducerRecord ~> kafkaSink
kafkaSource ~> mapFromConsumerRecord ~> printlnSink
ClosedShape
})
runnableGraph.run()
}
示例2: KafkaUtility
//设置package包名称以及导入依赖的类
package com.knoldus.streaming.kafka
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
object KafkaUtility {
//TODO It should read from config
private val kafkaParams = Map(
"bootstrap.servers" -> "localhost:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"auto.offset.reset" -> "earliest",
"group.id" -> "tweet-consumer"
)
private val preferredHosts = LocationStrategies.PreferConsistent
def createDStreamFromKafka(ssc: StreamingContext, topics: List[String]): InputDStream[ConsumerRecord[String, String]] =
KafkaUtils.createDirectStream[String, String](
ssc,
preferredHosts,
ConsumerStrategies.Subscribe[String, String](topics.distinct, kafkaParams)
)
}
示例3: PacketConsumer
//设置package包名称以及导入依赖的类
package edu.uw.at.iroberts.wirefugue.kafka.consumer
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import com.typesafe.config.ConfigFactory
import edu.uw.at.iroberts.wirefugue.kafka.producer.KafkaKey
import edu.uw.at.iroberts.wirefugue.kafka.serdes.{PacketDeserializer, PacketSerde}
import edu.uw.at.iroberts.wirefugue.pcap.Packet
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.IntegerDeserializer
import scala.concurrent.Await
import scala.concurrent.duration._
object PacketConsumer extends App {
type PacketRecord = ConsumerRecord[KafkaKey, Array[Byte]]
val config = ConfigFactory.load("application.conf")
implicit val system = ActorSystem("stream-consumer-system", config)
implicit val materializer = ActorMaterializer()
val consumerSettings = ConsumerSettings(system, new IntegerDeserializer, new PacketDeserializer)
.withGroupId("group1")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")
// Separate streams for each partition
val maxPartitions = 100
val consumerGroup = Consumer.plainPartitionedSource(consumerSettings, Subscriptions.topics("packets"))
val done = consumerGroup.map {
case (topicPartition, source) =>
val p: Int = topicPartition.partition
source
.map { (cr: ConsumerRecord[Integer, Packet]) => cr.value() }
.filter(_.ip.isDefined)
.toMat(Sink.foreach(packet => println(s"[$p] $packet")))(Keep.both)
.run()
}
.mapAsyncUnordered(maxPartitions)(_._2)
.runWith(Sink.ignore)
Await.result(done, Duration.Inf)
system.terminate()
}
示例4: StreamConsumer
//设置package包名称以及导入依赖的类
package consumers
import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import cats.data.Xor
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.consumer.internals.PartitionAssignor.Subscription
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import io.circe._
import io.circe.generic.auto._
import cats.data.Xor.{Left, Right}
import model.Employee
import scala.concurrent.Future
object StreamConsumer extends App{
implicit val actorSystem = ActorSystem("consumer-actors", ConfigFactory.load())
implicit val materializer = ActorMaterializer(ActorMaterializerSettings(actorSystem))
lazy val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers("localhost:9092")
.withGroupId("group13")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")//"latest")
.withProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
.withProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000")
lazy val subscription = Subscriptions.topics("raw-data-1")
lazy val db = new Processor()
Consumer.plainSource(consumerSettings, subscription)
.mapAsync(4){
db.processMessage
}
.runWith(Sink.ignore)
}
class Processor {
def processMessage(record: ConsumerRecord[Array[Byte], String]): Future[Done] ={
println(s"DB.save: ${record.value()}")
Option(record.value()).foreach{ jsonString =>
val mayBeEmp: Xor[Error, Employee] = jawn.decode[Employee](jsonString)
mayBeEmp match {
case Left(error) => println(error)
case Right(emp) => println(s"employee name: ${emp.name}")
}
}
Future.successful(Done) }
}
示例5: TopicHandler
//设置package包名称以及导入依赖的类
package org.hpi.esb.datavalidator.kafka
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Source
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.hpi.esb.util.OffsetManagement
case class TopicHandler(topicName: String, numberOfMessages: Long, topicSource: Source[ConsumerRecord[String, String], Consumer.Control])
object TopicHandler {
def create(topicName: String, system: ActorSystem): TopicHandler = {
val uuid = java.util.UUID.randomUUID.toString
val consumerSettings: ConsumerSettings[String, String] = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
.withBootstrapServers("192.168.30.208:9092,192.168.30.207:9092,192.168.30.141:9092")
.withGroupId(uuid)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
.withProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Int.MaxValue.toString)
.withProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, Int.MaxValue.toString)
.withProperty(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "20485000")
val partition = 0
val topicSource = createSource(consumerSettings, topicName, partition)
val numberOfMessages = OffsetManagement.getNumberOfMessages(topicName, partition)
new TopicHandler(topicName, numberOfMessages, topicSource)
}
def createSource(consumerSettings: ConsumerSettings[String, String], topicName: String, partition: Int) = {
val subscription = Subscriptions.assignmentWithOffset(
new TopicPartition(topicName, partition) -> 0L
)
Consumer.plainSource(consumerSettings, subscription)
}
}
示例6: OrderProcessingService
//设置package包名称以及导入依赖的类
package com.github.simonthecat.eventdrivenorders.orderservice
import java.util
import domain.Order
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import scala.collection.JavaConversions._
import scala.util.Try
class OrderProcessingService(orderConsumer: KafkaConsumer[String, String],
orderConsumerTopic: String,
storeUpdateProducer: KafkaProducer[String, String],
storeUpdateTopic: String) {
import com.owlike.genson.defaultGenson._
var running = true
def start() = {
orderConsumer.subscribe(util.Arrays.asList(orderConsumerTopic))
while (running) {
val records = orderConsumer.poll(100)
records.iterator().foreach(processOrder)
}
}
def processOrder(record: ConsumerRecord[String, String]): Unit = {
println(s"Processing ${record.value()}")
for {
order <- Try(fromJson[Order](record.value()))
_ <- Try {
println(s"Sending to store service: $order")
storeUpdateProducer.send(new ProducerRecord[String, String](storeUpdateTopic, toJson(order)))
}
} yield Unit
println(s"Processing ${record.value()}")
}
def stop() = {
orderConsumer.close()
running = false
}
}
示例7: ConfirmationService
//设置package包名称以及导入依赖的类
package com.github.simonthecat.eventdrivenorders.orderservice
import java.util
import domain.{OrderStatus, UpdateStoreStatus}
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.Try
class ConfirmationService(confirmationConsumer: KafkaConsumer[String, String],
confirmationTopic: String,
replyProducer: KafkaProducer[String, String],
replyTopic: String) {
import com.owlike.genson.defaultGenson._
var running = true
def start() = {
confirmationConsumer.subscribe(util.Arrays.asList(confirmationTopic))
Future {
while (running) {
val records = confirmationConsumer.poll(100)
records.iterator().foreach(processConfirmation)
}
}.recover {
case ex => ex.printStackTrace()
}
}
def processConfirmation(record: ConsumerRecord[String, String]): Unit = {
println(s"Processing ${record.value()}")
for {
status <- Try(fromJson[UpdateStoreStatus](record.value()))
_ <- Try {
println(s"Replying $status")
replyProducer.send(new ProducerRecord(replyTopic, toJson(OrderStatus(status.orderId, status.success))))
}
} yield Unit
println(s"Processed ${record.value()}")
}
def stop() = {
confirmationConsumer.close()
running = false
}
}
示例8: RsvpStreaming
//设置package包名称以及导入依赖的类
package com.github.mmolimar.asks.streaming
import java.util.UUID
import com.github.mmolimar.askss.common.implicits._
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
object RsvpStreaming extends App with LazyLogging {
val filter = config.getString("spark.filter").toLowerCase.split(",").toList
val ssc = new StreamingContext(buildSparkConfig, Seconds(5))
//TODO
kafkaStream(ssc)
.map(_.value())
.map(_.toEvent)
.filter(rsvp => {
filter.exists(rsvp.event.get.event_name.contains(_))
})
.print()
ssc.start()
ssc.awaitTermination()
def buildSparkConfig(): SparkConf = {
new SparkConf()
.setMaster(config.getString("spark.master"))
.setAppName("RsvpStreaming")
.set("spark.streaming.ui.retainedBatches", "5")
.set("spark.streaming.backpressure.enabled", "true")
.set("spark.sql.parquet.compression.codec", "snappy")
.set("spark.sql.parquet.mergeSchema", "true")
.set("spark.sql.parquet.binaryAsString", "true")
}
def kafkaStream(ssc: StreamingContext): InputDStream[ConsumerRecord[String, String]] = {
val topics = Set(config.getString("kafka.topic"))
val kafkaParams = Map[String, Object](
"metadata.broker.list" -> config.getString("kafka.brokerList"),
"enable.auto.commit" -> config.getBoolean("kafka.autoCommit").toString,
"auto.offset.reset" -> config.getString("kafka.autoOffset"),
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> config.getString("kafka.brokerList"),
ConsumerConfig.GROUP_ID_CONFIG -> s"consumer-${UUID.randomUUID}",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer]
)
val consumerStrategy = ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
KafkaUtils.createDirectStream[String, String](ssc, LocationStrategies.PreferConsistent, consumerStrategy)
}
}
示例9: ConsumerLoop
//设置package包名称以及导入依赖的类
package org.hpi.esb.datavalidator.consumer
import java.util.Properties
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, ConsumerRecords, KafkaConsumer}
import org.apache.kafka.common.TopicPartition
import org.hpi.esb.datavalidator.config.KafkaConsumerConfig
import org.hpi.esb.datavalidator.util.Logging
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
class ConsumerLoop(topic: String, config: KafkaConsumerConfig, results: ListBuffer[ConsumerRecord[String, String]]) extends Runnable with Logging {
private val props = createConsumerProps()
private val consumer = new KafkaConsumer(props)
initializeConsumer()
override def run(): Unit = {
var running = true
var zeroCount = 0
while (running) {
val records = consumer.poll(1000).asInstanceOf[ConsumerRecords[String, String]]
if (records.count() == 0) {
logger.debug(s"Received 0 records from Kafka.")
zeroCount += 1
if (zeroCount == 3) {
logger.debug("Received 0 records from Kafka for the third time. We assume the stream has finished and terminate.")
running = false
}
}
for (record <- records) {
results.append(record)
}
}
consumer.close()
}
private def initializeConsumer(): Unit = {
val topicPartitions = List(new TopicPartition(topic, 0))
consumer.assign(topicPartitions)
consumer.seekToBeginning(topicPartitions)
}
private def createConsumerProps(): Properties = {
val props = new Properties()
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.bootstrapServers)
props.put(ConsumerConfig.GROUP_ID_CONFIG, s"Validator")
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, config.autoCommit)
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, config.autoCommitInterval)
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, config.sessionTimeout)
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, config.keyDeserializerClass)
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, config.valueDeserializerClass)
props
}
}
示例10: IdentityValidation
//设置package包名称以及导入依赖的类
package org.hpi.esb.datavalidator.validation
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.hpi.esb.datavalidator.config.Configurable
import org.hpi.esb.datavalidator.util.Logging
import scala.collection.mutable.ListBuffer
class IdentityValidation(inRecords: ListBuffer[ConsumerRecord[String, String]],
resultRecords: ListBuffer[ConsumerRecord[String, String]])
extends ResultValidation(inRecords, resultRecords) with Configurable with Logging {
override def fulfillsRequirements(): Boolean = {
if (inRecords.size != resultRecords.size) {
logger.info(s"Invalid identity query result. Expected 'OUT' size: ${inRecords.size} Actual: ${resultRecords.size}")
false
}
else {
inRecords.zip(resultRecords)
.foreach { case (r1, r2) =>
if (r1.value() != r2.value()) {
logger.info(s"Invalid identity query result: Expected value: ${r1.value()} but found value: ${r2.value()}.")
return false
}
}
logger.info(s"Valid identity query results. Both topics have size ${inRecords.size}.")
true
}
}
}
示例11: ResultValidation
//设置package包名称以及导入依赖的类
package org.hpi.esb.datavalidator.validation
import org.apache.kafka.clients.consumer.ConsumerRecord
import scala.collection.mutable.ListBuffer
import org.hpi.esb.datavalidator.util.Logging
abstract class ResultValidation(inRecords: ListBuffer[ConsumerRecord[String, String]],
resultRecords: ListBuffer[ConsumerRecord[String, String]]) extends Logging {
protected def recordsAreIncomplete(): Boolean = {
if (inRecords.isEmpty) {
logger.info(s"'In' records list is empty. Can not validate correctly.")
return true
}
if (resultRecords.isEmpty) {
logger.info(s"'Results' records list is empty. Can not validate correctly.")
return true
}
false
}
def fulfillsRequirements(): Boolean
}
示例12: createConsumerRecordList
//设置package包名称以及导入依赖的类
package org.hpi.esb.datavalidator.validation
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.record.TimestampType
import scala.collection.mutable.ListBuffer
trait ValidationTestHelpers {
// create a list of ConsumerRecord objects
def createConsumerRecordList(topic: String, values: List[(Long, String)]): ListBuffer[ConsumerRecord[String, String]] = {
val l = new ListBuffer[ConsumerRecord[String, String]]()
values.foreach { case (timestamp, value) => l.append(createConsumerRecord(topic, timestamp, value)) }
l
}
def createConsumerRecord(topic: String, timestamp: Long, value: String): ConsumerRecord[String, String] = {
val partition = 0
val offset = 0
val checksum = 0
val serializedKeySize = 0
val serializedValueSize = 0
val key = "0"
new ConsumerRecord[String, String](topic, partition, offset, timestamp, TimestampType.CREATE_TIME, checksum, serializedKeySize, serializedValueSize, key, value)
}
}
示例13: PlainSourceConsumerMain
//设置package包名称以及导入依赖的类
package com.example.consumer
import java.util.concurrent.atomic.AtomicLong
import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object PlainSourceConsumerMain extends App {
implicit val system = ActorSystem("PlainSourceConsumerMain")
implicit val materializer = ActorMaterializer()
//TODO: move to configuration application.conf
val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers("localhost:9092")
.withGroupId("PlainSourceConsumer")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val db = new DB
db.loadOffset().foreach { fromOffset =>
val partition = 0
val subscription = Subscriptions.assignmentWithOffset(
new TopicPartition("topic1", partition) -> fromOffset
)
val done =
Consumer.plainSource(consumerSettings, subscription)
.mapAsync(1)(db.save)
.runWith(Sink.ignore)
}
}
//External Offset Storage
class DB {
private val offset = new AtomicLong(2)
def save(record: ConsumerRecord[Array[Byte], String]): Future[Done] = {
println(s"DB.save: ${record.value}")
offset.set(record.offset)
Future.successful(Done)
}
def loadOffset(): Future[Long] =
Future.successful(offset.get)
def update(data: String): Future[Done] = {
println(s"DB.update: $data")
Future.successful(Done)
}
}
示例14: KafkaEngine
//设置package包名称以及导入依赖的类
package com.lljv.analytics.hadoopengine
import java.util.HashMap
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import scala.util.control.NonFatal
class KafkaEngine(val settings: KafkaSettings) extends Serializable {
var producer: Option[KafkaProducer[String, String]] = None
def getStreamingParameters(): Map[String, String] = {
val parameters = Map[String, String](
"metadata.broker.list" -> settings.kafkaBroker,
"bootstrap.servers" -> settings.kafkaBroker,
"key.serializer" -> settings.stringSerializerType,
"value.serializer" -> settings.stringSerializerType,
"key.deserializer" -> settings.stringDeserializerType,
"value.deserializer" -> settings.stringDeserializerType,
"group.id" -> settings.consumerGroupId
)
return parameters
}
def startStream(
topicName: String,
sparkEngine: SparkStreamEngine
): Option[InputDStream[ConsumerRecord[String, String]]] =
{
val stream: Option[InputDStream[ConsumerRecord[String, String]]] = try {
Some(KafkaUtils.createDirectStream[String,String](
sparkEngine.streamingContext.get,
PreferConsistent,
Subscribe[String, String](Array(topicName), this.getStreamingParameters())
))
} catch {
case NonFatal(exc) => {
// printf(exc.getMessage())
// TODO: logging
None
}
}
return stream
}
}
示例15: produceRecord
//设置package包名称以及导入依赖的类
package de.choffmeister.microserviceutils.kafka.testkit
import java.util.UUID
import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{Deserializer, Serializer}
import scala.concurrent.Future
trait KafkaTest {
def produceRecord[K, V](topic: String, keySerializer: Serializer[K], valueSerializer: Serializer[V], key: K, value: V)(implicit system: ActorSystem, mat: Materializer): Future[Done] = {
val producerSettings = ProducerSettings(system, keySerializer, valueSerializer)
.withBootstrapServers(system.settings.config.getString("kafka.bootstrap-servers"))
Source.single(new ProducerRecord("mail.command.send", key, value))
.toMat(Producer.plainSink(producerSettings))(Keep.right)
.run()
}
def consumeRecordPF[K, V, Out](topic: String, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V])(pf: PartialFunction[ConsumerRecord[K, V], Out])(implicit system: ActorSystem, mat: Materializer): Future[Out] = {
val consumerSettings = ConsumerSettings(system, keyDeserializer, valueDeserializer)
.withBootstrapServers(system.settings.config.getString("kafka.bootstrap-servers"))
.withGroupId(UUID.randomUUID.toString)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
Consumer.plainSource(consumerSettings, Subscriptions.topics(topic))
.filter(pf.isDefinedAt)
.map(pf)
.toMat(Sink.head)(Keep.right)
.run()
}
}