当前位置: 首页>>代码示例>>Scala>>正文


Scala KafkaProducer类代码示例

本文整理汇总了Scala中org.apache.kafka.clients.producer.KafkaProducer的典型用法代码示例。如果您正苦于以下问题:Scala KafkaProducer类的具体用法?Scala KafkaProducer怎么用?Scala KafkaProducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了KafkaProducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: SimpleKafkaProducer

//设置package包名称以及导入依赖的类
package com.example

import java.util.Properties
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.json4s.{DefaultFormats, jackson}

class SimpleKafkaProducer(kafkaSocket: Socket, topic: String, brokers: Int = 1) {

  private val serializer = "org.apache.kafka.common.serialization.StringSerializer"

  private def configuration = {
    val props = new Properties()
    props.put("bootstrap.servers", kafkaSocket.toString())
    props.put("key.serializer", serializer)
    props.put("value.serializer", serializer)
    props
  }

  def send[T <: AnyRef](message: T) = {
    implicit val serialization = jackson.Serialization
    implicit val formats = DefaultFormats

    val producer = new KafkaProducer[String, String](configuration)
    val jsonMessage = serialization.write[T](message)
    val data = new ProducerRecord[String, String](topic, jsonMessage)

    producer.send(data)
    producer.close()
  }
} 
开发者ID:frossi85,项目名称:financial-statistics-crawler,代码行数:31,代码来源:SimpleKafkaProducer.scala

示例2: sendToKafkaWithNewProducer

//设置package包名称以及导入依赖的类
package pl.touk.nussknacker.engine.kafka

import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata}
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success, Try}

trait EspSimpleKafkaProducer {
  val kafkaConfig: KafkaConfig

  def sendToKafkaWithNewProducer(topic: String, key: Array[Byte], value: Array[Byte]): Future[RecordMetadata] = {
    var producer: KafkaProducer[Array[Byte], Array[Byte]] = null
    try {
      producer = createProducer()
      sendToKafka(topic, key, value)(producer)
    } finally {
      if (producer != null) {
        producer.close()
      }
    }
  }

  //method with such signature already exists in "net.cakesolutions" %% "scala-kafka-client" % "0.9.0.0" but I struggled to add this dependency...
  def sendToKafka(topic: String, key: Array[Byte], value: Array[Byte])(producer: KafkaProducer[Array[Byte], Array[Byte]]): Future[RecordMetadata] = {
    val promise = Promise[RecordMetadata]()
    producer.send(new ProducerRecord(topic, key, value), producerCallback(promise))
    promise.future
  }

  def createProducer(): KafkaProducer[Array[Byte], Array[Byte]] = {
    new KafkaProducer[Array[Byte], Array[Byte]](KafkaEspUtils.toProducerProperties(kafkaConfig))
  }

  private def producerCallback(promise: Promise[RecordMetadata]): Callback =
    new Callback {
      override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
        val result = if (exception == null) Success(metadata) else Failure(exception)
        promise.complete(result)
      }
    }
} 
开发者ID:TouK,项目名称:nussknacker,代码行数:41,代码来源:EspSimpleKafkaProducer.scala

示例3: ReadyKafkaProducer

//设置package包名称以及导入依赖的类
package com.bencassedy.readykafka.producer

import java.util.Properties
import java.util.concurrent.TimeUnit

import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer}
import org.apache.kafka.common.serialization.{StringSerializer, StringDeserializer}


class ReadyKafkaProducer {
  case class KafkaProducerConfigs(brokerList: String = "127.0.0.1:9092") {
    val properties = new Properties()
    properties.put("bootstrap.servers", brokerList)
    properties.put("key.serializer", classOf[StringSerializer])
    properties.put("value.serializer", classOf[StringSerializer])
//    properties.put("serializer.class", classOf[StringDeserializer])
//    properties.put("batch.size", 16384)
//    properties.put("linger.ms", 1)
//    properties.put("buffer.memory", 33554432)
  }

  val producer = new KafkaProducer[String, String](KafkaProducerConfigs().properties)

  def produce(topic: String, messages: Iterable[String]): Unit = {
    messages.foreach { m =>
      producer.send(new ProducerRecord[String, String](topic, m))
    }
    producer.close(100L, TimeUnit.MILLISECONDS)
  }
} 
开发者ID:bencassedy,项目名称:ready-kafka,代码行数:31,代码来源:ReadyKafkaProducer.scala

示例4: getNewProducer

//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.common

import java.util.Properties

import org.apache.kafka.clients.producer.KafkaProducer

object
KafkaProducerUntil {
  def getNewProducer(brokerList:String,
                     acks:Int,
                     lingerMs:Int,
                     producerType:String,
                     batchSize:Int): KafkaProducer[String, String] = {
    val kafkaProps = new Properties
    kafkaProps.put("bootstrap.servers", brokerList)
    kafkaProps.put("metadata.broker.list", brokerList)
    kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    kafkaProps.put("acks", acks.toString)
    kafkaProps.put("retries", "3")
    kafkaProps.put("producer.type", producerType)
    kafkaProps.put("linger.ms", lingerMs.toString)
    kafkaProps.put("batch.size", batchSize.toString)

    println("brokerList:" + brokerList)
    println("acks:" + acks)
    println("lingerMs:" + lingerMs)
    println("batchSize:" + batchSize)
    println("producerType:" + producerType)
    println(kafkaProps)

    return new KafkaProducer[String,String](kafkaProps)
  }
} 
开发者ID:hadooparchitecturebook,项目名称:Taxi360,代码行数:35,代码来源:KafkaProducerUntil.scala

示例5: Application

//设置package包名称以及导入依赖的类
import java.text.SimpleDateFormat
import java.util.concurrent.TimeUnit
import java.util.{Date, Properties}

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}



object Application extends App {
  val formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
  val simIDs = 10000 to 99999 //99000
  val brokers = "192.168.100.211:6667,192.168.100.212:6667,192.168.100.213:6667";
  val topic = "newTest";
  val props = new Properties
  props.put("bootstrap.servers", brokers)
  props.put("client.id", "Producer")
  props.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer")
  props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
  val producer = new KafkaProducer[Integer, String](props)
  while (true) {
    for (simID <- simIDs) {
      val data = Data(
        "64846867247",
        "?D" + simID,
        formatter.format(new Date()),
        121.503,
        31.3655,
        78,
        0,
        42,
        52806.7
      )

//      println(Data.getString(data))
      producer.send(new ProducerRecord[Integer, String](topic, Data.getString(data)))
//          TimeUnit.NANOSECONDS.sleep(100)

    }
    println("-------------------------------"+new Date())
    TimeUnit.MINUTES.sleep(18)
  }
} 
开发者ID:qiuwsh,项目名称:dataSimulator,代码行数:43,代码来源:Application.scala

示例6: Generator

//设置package包名称以及导入依赖的类
package data.processing.kafkagenerator

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import com.github.andr83.scalaconfig._
import com.yammer.metrics.core.{MetricName, MetricsRegistry}
import data.processing.avro.AvroEncoder

import scala.concurrent.forkjoin.ThreadLocalRandom


object Generator {

  val metricsRegistry = new MetricsRegistry

  val config = ConfigFactory.load()
  val props = config.getConfig("kafka-client").as[Properties]
  val topic = config.getString("kafka-client.topic")
  val numberOfUsers = config.getInt("generator.number.of.users")
  val urls = config.getStringList("generator.urls")
  val eventTypes = config.getStringList("generator.event.types")
  val throughput = config.getInt("generator.throughput")

  val avroEncoder = new AvroEncoder("/event-record.json")

  def generateEvent() = {
    val id = ThreadLocalRandom.current().nextLong()
    val ts = java.lang.System.currentTimeMillis()
    val userId = ThreadLocalRandom.current().nextInt(numberOfUsers).toHexString
    val url = urls.get(ThreadLocalRandom.current().nextInt(urls.size()))
    val eventType = eventTypes.get(ThreadLocalRandom.current().nextInt(eventTypes.size()))

    (id, avroEncoder.encode((id, ts, userId, url, eventType)))
  }

  def main(args: Array[String]): Unit = {
    val meter = metricsRegistry.newMeter(new MetricName("", "", ""), "", TimeUnit.SECONDS)
    val producer = new KafkaProducer[String, Array[Byte]](props)
    while(true) {
      if (meter.meanRate < throughput) {
        meter.mark()
        val event = generateEvent()
        producer.send(new ProducerRecord[String, Array[Byte]](topic, event._1.toString, event._2))
      }
      else {
        Thread.sleep(1)
      }
    }
    producer.flush()
    producer.close()
  }
} 
开发者ID:ipogudin,项目名称:data-processing-examples,代码行数:56,代码来源:Generator.scala

示例7: ProducerExample

//设置package包名称以及导入依赖的类
package edu.uw.at.iroberts.wirefugue.kafka.producer

import java.util.Properties

import akka.util.ByteString
import edu.uw.at.iroberts.wirefugue.pcap.IPAddress
import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerRecord}


class ProducerExample {
  val props: Properties = new Properties()
  props.put("bootstrap.servers", "localhost:9092")
  props.put("acks", "all")
  props.put("retries", 0.asInstanceOf[java.lang.Integer])
  props.put("batch.size", 16384.asInstanceOf[java.lang.Integer])
  props.put("linger.ms", 1.asInstanceOf[java.lang.Integer])
  props.put("buffer.memory", 33554432.asInstanceOf[java.lang.Integer])
  props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
  props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

  type IPProto = Byte
  type Port = Short
  type ProducerKey = (IPProto, IPAddress, Port, IPAddress, Port)
  type PacketProducerRecord = ProducerRecord[ProducerKey, ByteString]

  val producer: Producer[ProducerKey, Array[Byte]] = new KafkaProducer(props)
  for (i <- 0 until 100) {
    val key: ProducerKey = (4.toByte, IPAddress("192.168.0.1"), 25563.toShort, IPAddress("192.168.0.2"), 80.toShort)
    val someByteString: ByteString = ???
    val value: Array[Byte] = someByteString.toArray
    producer.send(new ProducerRecord[ProducerKey, Array[Byte]]("ipv4-packets", key, value))
  }

  producer.close()
} 
开发者ID:robertson-tech,项目名称:wirefugue,代码行数:36,代码来源:ProducerExample.scala

示例8: SimpleProducer

//设置package包名称以及导入依赖的类
package producers

import java.util.Properties

import model.Employee
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}


object SimpleProducer extends App{

  lazy val producer: KafkaProducer[String, String] = new KafkaProducer(getKafkaConfigProperties)
  lazy val testEmpObjects:List[Employee] = (0 to 1000).map(x=>Employee("John"+x, x)).toList

  testEmpObjects.foreach { emp =>
      producer.send(new ProducerRecord[String, String]("raw-data-1", emp.id.toString, Employee.asJson(emp)))
  }

  def getKafkaConfigProperties: Properties = {
    val config = new Properties()

    config.put("bootstrap.servers", "localhost:9092")
    config.put("group.id", "group1")
    config.put("client.id", "client1")
    config.put("enable.auto.commit", "true")
    config.put("session.timeout.ms", "10000")
    config.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    config.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    config.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    config.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

    config
  }
} 
开发者ID:ajit-scala,项目名称:kafka-consumers,代码行数:34,代码来源:SimpleProducer.scala

示例9: DataProducer

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender

import java.util.concurrent.{ScheduledFuture, ScheduledThreadPoolExecutor, TimeUnit}

import org.apache.kafka.clients.producer.KafkaProducer
import org.hpi.esb.commons.util.Logging
import org.hpi.esb.datasender.config.Configurable
import org.hpi.esb.datasender.output.writers.DatasenderRunResultWriter
import org.hpi.esb.util.OffsetManagement


class DataProducer(resultHandler: DatasenderRunResultWriter, kafkaProducer: KafkaProducer[String, String],
                   dataReader: DataReader, topics: List[String], numberOfThreads: Int,
                   sendingInterval: Int, sendingIntervalTimeUnit: TimeUnit,
                   duration: Long, durationTimeUnit: TimeUnit, singleColumnMode: Boolean) extends Logging with Configurable {


  val executor: ScheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(numberOfThreads)
  val producerThread = new DataProducerThread(this, kafkaProducer, dataReader, topics,
    singleColumnMode, duration, durationTimeUnit)

  val topicOffsets = getTopicOffsets()

  var t: ScheduledFuture[_] = _

  def shutDown(): Unit = {
    t.cancel(false)
    dataReader.close()
    kafkaProducer.close()
    executor.shutdown()
    logger.info("Shut data producer down.")
    val expectedRecordNumber = producerThread.numberOfRecords
    resultHandler.outputResults(topicOffsets, expectedRecordNumber)
  }

  def execute(): Unit = {
    val initialDelay = 0
    t = executor.scheduleAtFixedRate(producerThread, initialDelay, sendingInterval, sendingIntervalTimeUnit)
    val allTopics = topics.mkString(" ")
    logger.info(s"Sending records to following topics: $allTopics")
  }

  def getTopicOffsets(): Map[String, Long] = {
    topics.map(topic => {
      val currentOffset = OffsetManagement.getNumberOfMessages(topic, partition = 0)
      topic -> currentOffset
    }).toMap[String, Long]
  }
} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:50,代码来源:DataProducer.scala

示例10: DataProducerThread

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender

import java.util.concurrent.TimeUnit

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.hpi.esb.commons.util.Logging

class DataProducerThread(dataProducer: DataProducer, kafkaProducer: KafkaProducer[String, String],
                         dataReader: DataReader, topics: List[String], singleColumnMode: Boolean,
                         duration: Long, durationTimeUnit: TimeUnit) extends Runnable with Logging {

  var numberOfRecords: Int = 0
  val startTime: Long = currentTime
  val endTime: Long = startTime + durationTimeUnit.toMillis(duration)

  def currentTime: Long = System.currentTimeMillis()

  def run() {
    if (currentTime < endTime) {
      send(dataReader.readRecords)
    } else {
      logger.info(s"Shut down after $durationTimeUnit: $duration.")
      dataProducer.shutDown()
    }
  }

  def send(messagesOption: Option[List[String]]): Unit = {
    messagesOption.foreach(messages => {
      numberOfRecords += 1
      if (singleColumnMode) {
        sendSingleColumn(messages)
      } else {
        sendMultiColumns(messages)
      }
    })
  }

  def sendSingleColumn(messages: List[String]): Unit = {
    val message = messages.head
    topics.foreach(
      topic => {
        sendToKafka(topic = topic, message = message)
      })
  }

  def sendToKafka(topic: String, message: String): Unit = {
    val record = new ProducerRecord[String, String](topic, message)
    kafkaProducer.send(record)
    logger.debug(s"Sent value $message to topic $topic.")
  }

  def sendMultiColumns(messages: List[String]): Unit = {
    messages.zip(topics)
      .foreach {
        case (message, topic) =>
          sendToKafka(topic = topic, message = message)
      }
  }
} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:60,代码来源:DataProducerThread.scala

示例11: KafkaProducerMetrics

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender.metrics

import org.apache.kafka.clients.producer.KafkaProducer
import org.hpi.esb.commons.output.Util._


import scala.collection.JavaConversions._

class KafkaProducerMetrics(kafkaProducer: KafkaProducer[String, String]) extends Metric {
  val desiredMetrics = List("batch-size-avg", "record-send-rate", "records-per-request-avg",
    "record-error-rate", "record-queue-time-avg", "buffer-exhausted-rate",
    "bufferpool-wait-ratio", "request-latency-max", "waiting-threads",
  "buffer-available-bytes")

  override def getMetrics(): Map[String, String] = filterMetric(desiredMetrics)

  
  def filterMetric(desiredMetrics: List[String]): Map[String, String] = {
    val accMetrics = Map[String, String]()
    kafkaProducer.metrics().foldLeft(accMetrics) {
      case (acc, (metricName, metric)) => {
        if (desiredMetrics.contains(metricName.name()) &&
          metricName.group() == "producer-metrics") {
          val key = metricName.name()
          val value = round(metric.value(), precision = 2)
          acc ++ Map[String, String](key -> value.toString)
        } else {
          acc
        }
      }
      case (acc, _) => acc
    }
  }

} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:36,代码来源:KafkaProducerMetrics.scala

示例12: MetricHandler

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender.metrics

import org.apache.kafka.clients.producer.KafkaProducer
import org.hpi.esb.commons.util.Logging

class MetricHandler(kafkaProducer: KafkaProducer[String, String], topicStartOffsets: Map[String, Long],
                    expectedRecordNumber: Long) extends Logging {


  def fetchMetrics(): Map[String, String] = {
    // get all metrics produced by the kafka producer module
    val kafkaProducerMetrics = new KafkaProducerMetrics(kafkaProducer)
    val kafkaProducerMetricsValues = kafkaProducerMetrics.getMetrics()

    // get all metrics produced by the data sender
    val sendMetrics = new SendMetrics(topicStartOffsets, expectedRecordNumber)
    val sendMetricsValues = sendMetrics.getMetrics()

    kafkaProducerMetricsValues ++ sendMetricsValues
  }
} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:22,代码来源:MetricHandler.scala

示例13: DataDriver

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender

import java.util.Properties

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
import org.hpi.esb.commons.config.Configs
import org.hpi.esb.commons.util.Logging
import org.hpi.esb.datasender.config._
import org.hpi.esb.datasender.output.writers.DatasenderRunResultWriter

import scala.io.Source

class DataDriver() extends Logging {

  private val topics = Configs.benchmarkConfig.sourceTopics
  private val config = ConfigHandler.config
  private val dataReader = createDataReader(config.dataReaderConfig)
  private val kafkaProducerProperties = createKafkaProducerProperties(config.kafkaProducerConfig)
  private val kafkaProducer = new KafkaProducer[String, String](kafkaProducerProperties)
  private val resultHandler = new DatasenderRunResultWriter(config, Configs.benchmarkConfig, kafkaProducer)
  private val dataProducer = createDataProducer(kafkaProducer, dataReader, resultHandler)

  def run(): Unit = {
    dataProducer.execute()
  }

  def createKafkaProducerProperties(kafkaProducerConfig: KafkaProducerConfig): Properties = {

    val props = new Properties()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProducerConfig.bootstrapServers.get)
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, kafkaProducerConfig.keySerializerClass.get)
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, kafkaProducerConfig.valueSerializerClass.get)
    props.put(ProducerConfig.ACKS_CONFIG, kafkaProducerConfig.acks.get)
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaProducerConfig.batchSize.get.toString)
    props.put(ProducerConfig.LINGER_MS_CONFIG, kafkaProducerConfig.lingerTime.toString)
    props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaProducerConfig.bufferMemorySize.toString)
    props
  }

  def createDataReader(dataReaderConfig: DataReaderConfig): DataReader = {
    new DataReader(Source.fromFile(dataReaderConfig.dataInputPath.get),
      dataReaderConfig.columns.get,
      dataReaderConfig.columnDelimiter.get,
      dataReaderConfig.dataColumnStart.get,
      dataReaderConfig.readInRam)
  }

  def createDataProducer(kafkaProducer: KafkaProducer[String, String], dataReader: DataReader,
                         resultHandler: DatasenderRunResultWriter): DataProducer = {

    val numberOfThreads = config.dataSenderConfig.numberOfThreads.get
    val sendingInterval = Configs.benchmarkConfig.sendingInterval
    val sendingIntervalTimeUnit = Configs.benchmarkConfig.getSendingIntervalTimeUnit()
    val duration = Configs.benchmarkConfig.duration
    val durationTimeUnit = Configs.benchmarkConfig.getDurationTimeUnit()
    val singleColumnMode = config.dataSenderConfig.singleColumnMode

    new DataProducer(resultHandler, kafkaProducer, dataReader, topics, numberOfThreads,
      sendingInterval, sendingIntervalTimeUnit, duration, durationTimeUnit, singleColumnMode)
  }
} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:62,代码来源:DataDriver.scala

示例14: DatasenderRunResultWriter

//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender.output.writers

import java.text.SimpleDateFormat
import java.util.Date

import org.apache.kafka.clients.producer.KafkaProducer
import org.hpi.esb.commons.config.Configs
import org.hpi.esb.commons.config.Configs.BenchmarkConfig
import org.hpi.esb.commons.output.{CSVOutput, Tabulator}
import org.hpi.esb.commons.util.Logging
import org.hpi.esb.datasender.config._
import org.hpi.esb.datasender.metrics.MetricHandler
import org.hpi.esb.datasender.output.model.{ConfigValues, DatasenderResultRow, ResultValues}

class DatasenderRunResultWriter(config: Config, benchmarkConfig: BenchmarkConfig,
                                kafkaProducer: KafkaProducer[String, String]) extends Logging {

  val currentTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date())

  def outputResults(topicOffsets: Map[String, Long], expectedRecordNumber: Int): Unit = {
    val metricHandler = new MetricHandler(kafkaProducer, topicOffsets, expectedRecordNumber)
    val metrics = metricHandler.fetchMetrics()

    val configValues = ConfigValues.get(ConfigHandler.config, Configs.benchmarkConfig)
    val resultValues = new ResultValues(metrics)

    val dataSenderResultRow = DatasenderResultRow(configValues, resultValues)

    val table = dataSenderResultRow.toTable()
    CSVOutput.write(table, ConfigHandler.resultsPath, ConfigHandler.resultFileName(currentTime))
    logger.info(Tabulator.format(table))
  }
} 
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:34,代码来源:DatasenderRunResultWriter.scala

示例15: OrderProcessingService

//设置package包名称以及导入依赖的类
package com.github.simonthecat.eventdrivenorders.orderservice

import java.util

import domain.Order
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}

import scala.collection.JavaConversions._
import scala.util.Try


class OrderProcessingService(orderConsumer: KafkaConsumer[String, String],
                             orderConsumerTopic: String,
                             storeUpdateProducer: KafkaProducer[String, String],
                             storeUpdateTopic: String) {

  import com.owlike.genson.defaultGenson._

  var running = true

  def start() = {
    orderConsumer.subscribe(util.Arrays.asList(orderConsumerTopic))

    while (running) {
      val records = orderConsumer.poll(100)
      records.iterator().foreach(processOrder)
    }
  }

  def processOrder(record: ConsumerRecord[String, String]): Unit = {
    println(s"Processing ${record.value()}")

    for {
      order <- Try(fromJson[Order](record.value()))
      _ <- Try {
        println(s"Sending to store service: $order")
        storeUpdateProducer.send(new ProducerRecord[String, String](storeUpdateTopic, toJson(order)))
      }
    } yield Unit

    println(s"Processing ${record.value()}")

  }

  def stop() = {
    orderConsumer.close()
    running = false
  }
} 
开发者ID:simonko91,项目名称:event-driven-orders,代码行数:51,代码来源:OrderProcessingService.scala


注:本文中的org.apache.kafka.clients.producer.KafkaProducer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。