本文整理汇总了Scala中org.apache.kafka.clients.producer.ProducerRecord类的典型用法代码示例。如果您正苦于以下问题:Scala ProducerRecord类的具体用法?Scala ProducerRecord怎么用?Scala ProducerRecord使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProducerRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ProcessingKafkaApplication
//设置package包名称以及导入依赖的类
package com.packt.chapter8
import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}
import scala.concurrent.duration._
object ProcessingKafkaApplication extends App {
implicit val actorSystem = ActorSystem("SimpleStream")
implicit val actorMaterializer = ActorMaterializer()
val bootstrapServers = "localhost:9092"
val kafkaTopic = "akka_streams_topic"
val partition = 0
val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))
val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers(bootstrapServers)
.withGroupId("akka_streams_group")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers(bootstrapServers)
val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
val kafkaSink = Producer.plainSink(producerSettings)
val printlnSink = Sink.foreach(println)
val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())
tickSource ~> mapToProducerRecord ~> kafkaSink
kafkaSource ~> mapFromConsumerRecord ~> printlnSink
ClosedShape
})
runnableGraph.run()
}
示例2: SimpleKafkaProducer
//设置package包名称以及导入依赖的类
package com.example
import java.util.Properties
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.json4s.{DefaultFormats, jackson}
class SimpleKafkaProducer(kafkaSocket: Socket, topic: String, brokers: Int = 1) {
private val serializer = "org.apache.kafka.common.serialization.StringSerializer"
private def configuration = {
val props = new Properties()
props.put("bootstrap.servers", kafkaSocket.toString())
props.put("key.serializer", serializer)
props.put("value.serializer", serializer)
props
}
def send[T <: AnyRef](message: T) = {
implicit val serialization = jackson.Serialization
implicit val formats = DefaultFormats
val producer = new KafkaProducer[String, String](configuration)
val jsonMessage = serialization.write[T](message)
val data = new ProducerRecord[String, String](topic, jsonMessage)
producer.send(data)
producer.close()
}
}
示例3: sendToKafkaWithNewProducer
//设置package包名称以及导入依赖的类
package pl.touk.nussknacker.engine.kafka
import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata}
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success, Try}
trait EspSimpleKafkaProducer {
val kafkaConfig: KafkaConfig
def sendToKafkaWithNewProducer(topic: String, key: Array[Byte], value: Array[Byte]): Future[RecordMetadata] = {
var producer: KafkaProducer[Array[Byte], Array[Byte]] = null
try {
producer = createProducer()
sendToKafka(topic, key, value)(producer)
} finally {
if (producer != null) {
producer.close()
}
}
}
//method with such signature already exists in "net.cakesolutions" %% "scala-kafka-client" % "0.9.0.0" but I struggled to add this dependency...
def sendToKafka(topic: String, key: Array[Byte], value: Array[Byte])(producer: KafkaProducer[Array[Byte], Array[Byte]]): Future[RecordMetadata] = {
val promise = Promise[RecordMetadata]()
producer.send(new ProducerRecord(topic, key, value), producerCallback(promise))
promise.future
}
def createProducer(): KafkaProducer[Array[Byte], Array[Byte]] = {
new KafkaProducer[Array[Byte], Array[Byte]](KafkaEspUtils.toProducerProperties(kafkaConfig))
}
private def producerCallback(promise: Promise[RecordMetadata]): Callback =
new Callback {
override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
val result = if (exception == null) Success(metadata) else Failure(exception)
promise.complete(result)
}
}
}
示例4: ReadyKafkaProducer
//设置package包名称以及导入依赖的类
package com.bencassedy.readykafka.producer
import java.util.Properties
import java.util.concurrent.TimeUnit
import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer}
import org.apache.kafka.common.serialization.{StringSerializer, StringDeserializer}
class ReadyKafkaProducer {
case class KafkaProducerConfigs(brokerList: String = "127.0.0.1:9092") {
val properties = new Properties()
properties.put("bootstrap.servers", brokerList)
properties.put("key.serializer", classOf[StringSerializer])
properties.put("value.serializer", classOf[StringSerializer])
// properties.put("serializer.class", classOf[StringDeserializer])
// properties.put("batch.size", 16384)
// properties.put("linger.ms", 1)
// properties.put("buffer.memory", 33554432)
}
val producer = new KafkaProducer[String, String](KafkaProducerConfigs().properties)
def produce(topic: String, messages: Iterable[String]): Unit = {
messages.foreach { m =>
producer.send(new ProducerRecord[String, String](topic, m))
}
producer.close(100L, TimeUnit.MILLISECONDS)
}
}
示例5: Application
//设置package包名称以及导入依赖的类
import java.text.SimpleDateFormat
import java.util.concurrent.TimeUnit
import java.util.{Date, Properties}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
object Application extends App {
val formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
val simIDs = 10000 to 99999 //99000
val brokers = "192.168.100.211:6667,192.168.100.212:6667,192.168.100.213:6667";
val topic = "newTest";
val props = new Properties
props.put("bootstrap.servers", brokers)
props.put("client.id", "Producer")
props.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer")
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
val producer = new KafkaProducer[Integer, String](props)
while (true) {
for (simID <- simIDs) {
val data = Data(
"64846867247",
"?D" + simID,
formatter.format(new Date()),
121.503,
31.3655,
78,
0,
42,
52806.7
)
// println(Data.getString(data))
producer.send(new ProducerRecord[Integer, String](topic, Data.getString(data)))
// TimeUnit.NANOSECONDS.sleep(100)
}
println("-------------------------------"+new Date())
TimeUnit.MINUTES.sleep(18)
}
}
示例6: Generator
//设置package包名称以及导入依赖的类
package data.processing.kafkagenerator
import java.util.Properties
import java.util.concurrent.TimeUnit
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import com.github.andr83.scalaconfig._
import com.yammer.metrics.core.{MetricName, MetricsRegistry}
import data.processing.avro.AvroEncoder
import scala.concurrent.forkjoin.ThreadLocalRandom
object Generator {
val metricsRegistry = new MetricsRegistry
val config = ConfigFactory.load()
val props = config.getConfig("kafka-client").as[Properties]
val topic = config.getString("kafka-client.topic")
val numberOfUsers = config.getInt("generator.number.of.users")
val urls = config.getStringList("generator.urls")
val eventTypes = config.getStringList("generator.event.types")
val throughput = config.getInt("generator.throughput")
val avroEncoder = new AvroEncoder("/event-record.json")
def generateEvent() = {
val id = ThreadLocalRandom.current().nextLong()
val ts = java.lang.System.currentTimeMillis()
val userId = ThreadLocalRandom.current().nextInt(numberOfUsers).toHexString
val url = urls.get(ThreadLocalRandom.current().nextInt(urls.size()))
val eventType = eventTypes.get(ThreadLocalRandom.current().nextInt(eventTypes.size()))
(id, avroEncoder.encode((id, ts, userId, url, eventType)))
}
def main(args: Array[String]): Unit = {
val meter = metricsRegistry.newMeter(new MetricName("", "", ""), "", TimeUnit.SECONDS)
val producer = new KafkaProducer[String, Array[Byte]](props)
while(true) {
if (meter.meanRate < throughput) {
meter.mark()
val event = generateEvent()
producer.send(new ProducerRecord[String, Array[Byte]](topic, event._1.toString, event._2))
}
else {
Thread.sleep(1)
}
}
producer.flush()
producer.close()
}
}
示例7: ProducerExample
//设置package包名称以及导入依赖的类
package edu.uw.at.iroberts.wirefugue.kafka.producer
import java.util.Properties
import akka.util.ByteString
import edu.uw.at.iroberts.wirefugue.pcap.IPAddress
import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerRecord}
class ProducerExample {
val props: Properties = new Properties()
props.put("bootstrap.servers", "localhost:9092")
props.put("acks", "all")
props.put("retries", 0.asInstanceOf[java.lang.Integer])
props.put("batch.size", 16384.asInstanceOf[java.lang.Integer])
props.put("linger.ms", 1.asInstanceOf[java.lang.Integer])
props.put("buffer.memory", 33554432.asInstanceOf[java.lang.Integer])
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
type IPProto = Byte
type Port = Short
type ProducerKey = (IPProto, IPAddress, Port, IPAddress, Port)
type PacketProducerRecord = ProducerRecord[ProducerKey, ByteString]
val producer: Producer[ProducerKey, Array[Byte]] = new KafkaProducer(props)
for (i <- 0 until 100) {
val key: ProducerKey = (4.toByte, IPAddress("192.168.0.1"), 25563.toShort, IPAddress("192.168.0.2"), 80.toShort)
val someByteString: ByteString = ???
val value: Array[Byte] = someByteString.toArray
producer.send(new ProducerRecord[ProducerKey, Array[Byte]]("ipv4-packets", key, value))
}
producer.close()
}
示例8: SimpleProducer
//设置package包名称以及导入依赖的类
package producers
import java.util.Properties
import model.Employee
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
object SimpleProducer extends App{
lazy val producer: KafkaProducer[String, String] = new KafkaProducer(getKafkaConfigProperties)
lazy val testEmpObjects:List[Employee] = (0 to 1000).map(x=>Employee("John"+x, x)).toList
testEmpObjects.foreach { emp =>
producer.send(new ProducerRecord[String, String]("raw-data-1", emp.id.toString, Employee.asJson(emp)))
}
def getKafkaConfigProperties: Properties = {
val config = new Properties()
config.put("bootstrap.servers", "localhost:9092")
config.put("group.id", "group1")
config.put("client.id", "client1")
config.put("enable.auto.commit", "true")
config.put("session.timeout.ms", "10000")
config.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
config.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
config.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
config.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
config
}
}
示例9: KafkaFeedsExporter
//设置package包名称以及导入依赖的类
package ru.fediq.scrapingkit.backend
import cakesolutions.kafka.KafkaProducer
import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
import org.apache.kafka.common.serialization.StringSerializer
import ru.fediq.scrapingkit.scraper.ScrapedEntity
import scala.concurrent.Future
class KafkaFeedsExporter(
val bootstrapServer: String,
val topic: String
) extends FeedExporter {
val producer = KafkaProducer(KafkaProducer.Conf(new StringSerializer(), new StringSerializer, bootstrapServer))
override def store[T <: ScrapedEntity](entity: T): Future[RecordMetadata] = {
producer.send(new ProducerRecord(topic, entity.dump))
}
override def close() = producer.close()
}
示例10: DataProducerThread
//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender
import java.util.concurrent.TimeUnit
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.hpi.esb.commons.util.Logging
class DataProducerThread(dataProducer: DataProducer, kafkaProducer: KafkaProducer[String, String],
dataReader: DataReader, topics: List[String], singleColumnMode: Boolean,
duration: Long, durationTimeUnit: TimeUnit) extends Runnable with Logging {
var numberOfRecords: Int = 0
val startTime: Long = currentTime
val endTime: Long = startTime + durationTimeUnit.toMillis(duration)
def currentTime: Long = System.currentTimeMillis()
def run() {
if (currentTime < endTime) {
send(dataReader.readRecords)
} else {
logger.info(s"Shut down after $durationTimeUnit: $duration.")
dataProducer.shutDown()
}
}
def send(messagesOption: Option[List[String]]): Unit = {
messagesOption.foreach(messages => {
numberOfRecords += 1
if (singleColumnMode) {
sendSingleColumn(messages)
} else {
sendMultiColumns(messages)
}
})
}
def sendSingleColumn(messages: List[String]): Unit = {
val message = messages.head
topics.foreach(
topic => {
sendToKafka(topic = topic, message = message)
})
}
def sendToKafka(topic: String, message: String): Unit = {
val record = new ProducerRecord[String, String](topic, message)
kafkaProducer.send(record)
logger.debug(s"Sent value $message to topic $topic.")
}
def sendMultiColumns(messages: List[String]): Unit = {
messages.zip(topics)
.foreach {
case (message, topic) =>
sendToKafka(topic = topic, message = message)
}
}
}
示例11: OrderProcessingService
//设置package包名称以及导入依赖的类
package com.github.simonthecat.eventdrivenorders.orderservice
import java.util
import domain.Order
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import scala.collection.JavaConversions._
import scala.util.Try
class OrderProcessingService(orderConsumer: KafkaConsumer[String, String],
orderConsumerTopic: String,
storeUpdateProducer: KafkaProducer[String, String],
storeUpdateTopic: String) {
import com.owlike.genson.defaultGenson._
var running = true
def start() = {
orderConsumer.subscribe(util.Arrays.asList(orderConsumerTopic))
while (running) {
val records = orderConsumer.poll(100)
records.iterator().foreach(processOrder)
}
}
def processOrder(record: ConsumerRecord[String, String]): Unit = {
println(s"Processing ${record.value()}")
for {
order <- Try(fromJson[Order](record.value()))
_ <- Try {
println(s"Sending to store service: $order")
storeUpdateProducer.send(new ProducerRecord[String, String](storeUpdateTopic, toJson(order)))
}
} yield Unit
println(s"Processing ${record.value()}")
}
def stop() = {
orderConsumer.close()
running = false
}
}
示例12: ConfirmationService
//设置package包名称以及导入依赖的类
package com.github.simonthecat.eventdrivenorders.orderservice
import java.util
import domain.{OrderStatus, UpdateStoreStatus}
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.Try
class ConfirmationService(confirmationConsumer: KafkaConsumer[String, String],
confirmationTopic: String,
replyProducer: KafkaProducer[String, String],
replyTopic: String) {
import com.owlike.genson.defaultGenson._
var running = true
def start() = {
confirmationConsumer.subscribe(util.Arrays.asList(confirmationTopic))
Future {
while (running) {
val records = confirmationConsumer.poll(100)
records.iterator().foreach(processConfirmation)
}
}.recover {
case ex => ex.printStackTrace()
}
}
def processConfirmation(record: ConsumerRecord[String, String]): Unit = {
println(s"Processing ${record.value()}")
for {
status <- Try(fromJson[UpdateStoreStatus](record.value()))
_ <- Try {
println(s"Replying $status")
replyProducer.send(new ProducerRecord(replyTopic, toJson(OrderStatus(status.orderId, status.success))))
}
} yield Unit
println(s"Processed ${record.value()}")
}
def stop() = {
confirmationConsumer.close()
running = false
}
}
示例13: OrderService
//设置package包名称以及导入依赖的类
package com.github.eventdrivenorders.api
import domain.Order
import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer}
import com.github.eventdrivenorders.api.json.OrderFormats._
import spray.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class OrderService(orderProducer: KafkaProducer[String, String],
orderTopic: String,
orderStatusService: OrderStatusService) {
def submit(order: Order) = Future {
println(s"Submitting order: $order")
val orderJson = order.toJson.toString()
// orderStatusService.addPending(order.id)
orderProducer.send(new ProducerRecord[String, String](orderTopic, orderJson))
}
}
示例14: running
//设置package包名称以及导入依赖的类
package producers
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Source}
import akka.{Done, NotUsed}
import broker.ActorBroker
import config.AppConfig
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
import scala.concurrent.Future
trait Producerable extends ActorBroker {
val config: AppConfig
implicit val materializer = ActorMaterializer()
val producerSettings = ProducerSettings(context.system, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers(s"${config.kafkaConfig.uri}:${config.kafkaConfig.port}")
def running(): Receive = {
case Stop =>
log.info("Stopping Kafka producer stream and actor")
context.stop(self)
}
def sendToSink(message: String): Unit = {
log.info(s"Attempting to produce message on topic $topicName")
val kafkaSink = Producer.plainSink(producerSettings)
val stringToProducerRecord: ProducerRecord[Array[Byte], String] = new ProducerRecord[Array[Byte], String](topicName, message)
val (a, future): (NotUsed, Future[Done]) = Source.fromFuture(Future(stringToProducerRecord))
.toMat(kafkaSink)(Keep.both)
.run()
future.onFailure {
case ex =>
log.error("Stream failed due to error, restarting", ex)
throw ex
}
context.become(running())
log.info(s"Writer now running, writing random numbers to topic $topicName")
}
case object Stop
}
示例15: Main
//设置package包名称以及导入依赖的类
import java.util.concurrent.TimeUnit.SECONDS
import akka.actor.ActorSystem
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.scaladsl.Source
import akka.stream.{ActorMaterializer, ThrottleMode}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
import scala.concurrent.duration.FiniteDuration
import scala.language.postfixOps
object Main {
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem.apply("akka-stream-kafka")
implicit val materializer = ActorMaterializer()
val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers("localhost:9092;localhost:9093")
Source.repeat(0)
.scan(0)((next, _) => next + 1)
.throttle(1, FiniteDuration(2L, SECONDS), 1, ThrottleMode.Shaping)
.map(nextInt => {
val topicName = "topic1"
val partitionCount = 2
val partition = nextInt % partitionCount
new ProducerRecord[Array[Byte], String](topicName, nextInt.toString.getBytes, nextInt.toString)
// new ProducerRecord[Array[Byte], String](topicName, partition, null, nextInt.toString)
})
.runWith(Producer.plainSink(producerSettings))
}
}