本文整理汇总了Scala中kafka.server.KafkaConfig类的典型用法代码示例。如果您正苦于以下问题:Scala KafkaConfig类的具体用法?Scala KafkaConfig怎么用?Scala KafkaConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KafkaConfig类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ClientProducerRequest
//设置package包名称以及导入依赖的类
package com.github.mmolimar.vkitm.common.cache
import java.util.Properties
import kafka.server.KafkaConfig
import org.apache.kafka.clients.ManualMetadataUpdater
import org.apache.kafka.common.metrics.Metrics
private[cache] trait CacheEntry {
}
case class ClientProducerRequest(clientId: String,
brokerList: String,
acks: Short)
(val props: Properties = new Properties) extends CacheEntry
case class NetworkClientRequest(clientId: String)
(val metadataUpdater: ManualMetadataUpdater,
val config: KafkaConfig,
val metrics: Metrics) extends CacheEntry
示例2: EmbeddedVKitM
//设置package包名称以及导入依赖的类
package com.github.mmolimar.vkitm.embedded
import java.util.Properties
import com.github.mmolimar.vkitm.server.{VKitMConfig, VKitMServer}
import com.github.mmolimar.vkitm.utils.TestUtils
import kafka.server.KafkaConfig
import kafka.utils.Logging
import org.apache.kafka.clients.producer.ProducerConfig
class EmbeddedVKitM(zkConnection: String,
brokerList: String,
port: Int = TestUtils.getAvailablePort) extends Logging {
private var vkitmServer: VKitMServer = null
def startup() {
info("Starting up VKitM server")
val serverProps = new Properties
serverProps.setProperty(KafkaConfig.ZkConnectProp, zkConnection)
serverProps.setProperty(KafkaConfig.HostNameProp, "localhost")
serverProps.setProperty(KafkaConfig.PortProp, port.toString)
serverProps.setProperty(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:" + port)
val producerProps = new Properties
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
val brokerPort = brokerList.split(":")(1)
val consumerProps = new Properties
consumerProps.setProperty(KafkaConfig.ZkConnectProp, zkConnection)
consumerProps.setProperty(KafkaConfig.HostNameProp, "localhost")
consumerProps.setProperty(KafkaConfig.PortProp, brokerPort)
consumerProps.setProperty(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:" + brokerPort)
vkitmServer = new VKitMServer(VKitMConfig.fromProps(serverProps, producerProps, consumerProps))
vkitmServer.startup()
info("Started embedded VKitM server")
}
def shutdown() {
vkitmServer.shutdown()
}
def getPort: Int = port
def getBrokerList: String = "localhost:" + getPort
def getServer: VKitMServer = vkitmServer
override def toString: String = {
val sb: StringBuilder = StringBuilder.newBuilder
sb.append("VKitM{")
sb.append("config='").append(vkitmServer.config).append('\'')
sb.append('}')
sb.toString
}
}
示例3: send
//设置package包名称以及导入依赖的类
package it.agilelab.bigdata.wasp.core.kafka
import java.util.Properties
import it.agilelab.bigdata.wasp.core.WaspEvent
import it.agilelab.bigdata.wasp.core.WaspEvent.WaspMessageEnvelope
import it.agilelab.bigdata.wasp.core.models.configuration.{TinyKafkaConfig, KafkaConfigModel}
import kafka.producer.{DefaultPartitioner, KeyedMessage, Producer, ProducerConfig}
import kafka.serializer.StringEncoder
import kafka.server.KafkaConfig
def send(topic: String, key: K, message: V): Unit =
batchSend(topic, key, Seq(message))
def batchSend(topic: String, key: K, batch: Seq[V]): Unit = {
val messages = batch map (msg => new KeyedMessage[K, V](topic, key, msg))
producer.send(messages.toArray: _*)
}
def close(): Unit = producer.close()
}
object WaspKafkaWriter {
def createConfig(brokers: Set[String], batchSize: Int, producerType: String, serializerFqcn: String, keySerializerFqcn: String, partitionerFqcn: String): ProducerConfig = {
val props = new Properties()
props.put("metadata.broker.list", brokers.mkString(","))
props.put("serializer.class", serializerFqcn)
props.put("key.serializer.class", keySerializerFqcn)
props.put("partitioner.class", partitionerFqcn)
props.put("producer.type", producerType)
props.put("request.required.acks", "1")
props.put("batch.num.messages", batchSize.toString)
new ProducerConfig(props)
}
def defaultConfig(config: KafkaConfig): ProducerConfig =
createConfig(Set(s"${config.hostName}:${config.port}"), 100, "async", classOf[StringEncoder].getName, classOf[StringEncoder].getName, classOf[DefaultPartitioner].getName)
}
示例4: TestingKafkaCluster
//设置package包名称以及导入依赖的类
package com.saikocat.test
import java.io.IOException
import java.util.Properties
import kafka.server.KafkaConfig
import kafka.server.KafkaServerStartable
import kafka.utils.TestUtils
import org.apache.curator.test.TestingServer
class TestingKafkaCluster(val kafkaServer: KafkaServerStartable,
val zkServer: TestingServer) {
def start(): Unit = {
kafkaServer.startup()
}
def kafkaConfig(): KafkaConfig = {
kafkaServer.serverConfig
}
def kafkaBrokerString(): String = {
s"localhost:${kafkaServer.serverConfig.port}"
}
def zkConnectString(): String = {
return zkServer.getConnectString()
}
def kafkaPort(): Int = {
return kafkaServer.serverConfig.port
}
@throws(classOf[IOException])
def stop(): Unit = {
kafkaServer.shutdown()
zkServer.stop()
}
}
object TestingKafkaCluster {
@throws(classOf[Exception])
def apply(): TestingKafkaCluster = {
val zkServer = new TestingServer()
val config: KafkaConfig = getKafkaConfig(zkServer.getConnectString())
val kafkaServer = new KafkaServerStartable(config)
new TestingKafkaCluster(kafkaServer, zkServer)
}
def getKafkaConfig(zkConnectString: String): KafkaConfig = {
val propsI: scala.collection.Iterator[Properties] =
TestUtils.createBrokerConfigs(1).iterator
assert(propsI.hasNext)
val props: Properties = propsI.next()
assert(props.containsKey("zookeeper.connect"))
props.put("zookeeper.connect", zkConnectString)
props.put("host.name", "localhost")
new KafkaConfig(props)
}
}
示例5: EmbeddedKafka
//设置package包名称以及导入依赖的类
package nl.bigdatarepublic.streaming.embedded.adapter.kafka
import java.util.Properties
import com.typesafe.scalalogging.LazyLogging
import kafka.server.{KafkaConfig, KafkaServerStartable}
import nl.bigdatarepublic.streaming.embedded.entity.EmbeddedService
import scala.collection.JavaConverters._
import scala.reflect.io.Path
import scala.util.{Failure, Success, Try}
class EmbeddedKafka(props: Map[String, String], clearState: Boolean) extends LazyLogging with EmbeddedService {
val kafka: KafkaServerStartable = new KafkaServerStartable(KafkaConfig(props.asJava))
def start(): Unit = {
// Clear out the existing kafka dir upon startup.
if (clearState) {
logger.info("Cleaning Kafka data dir before start...")
kafka.serverConfig.logDirs.foreach { x =>
Try(Path(x).deleteRecursively()) match {
case Success(true) => logger.info("Successfully cleaned Kafka data dir...")
case Success(false) => logger.info("Failed to clean Kafka data dir...")
case Failure(e) => logger.warn("Failed to clean Kafka data dir", e)
}
}
}
logger.info("Starting embedded Kafka...")
kafka.startup()
logger.info("Successfully started embedded Kafka")
}
def stop(): Unit = {
logger.info("Stopping embedded Kafka...")
kafka.shutdown()
logger.info("Successfully stopped embedded Kafka")
}
}
object EmbeddedKafka {
def apply(props: Map[String, String], clearState: Boolean): EmbeddedKafka = new EmbeddedKafka(props, clearState)
def apply(props: Map[String, String]): EmbeddedKafka = new EmbeddedKafka(props, false)
// Java compatibility
def apply(props: Properties, clearState: Boolean): EmbeddedKafka = new EmbeddedKafka(props.asScala.toMap, clearState)
def apply(props: Properties): EmbeddedKafka = new EmbeddedKafka(props.asScala.toMap, false)
}