本文整理汇总了Java中kafka.server.KafkaConfig类的典型用法代码示例。如果您正苦于以下问题:Java KafkaConfig类的具体用法?Java KafkaConfig怎么用?Java KafkaConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KafkaConfig类属于kafka.server包,在下文中一共展示了KafkaConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import kafka.server.KafkaConfig; //导入依赖的package包/类
public void initialize() {
if (initialized) {
throw new IllegalStateException("Context has been already initialized");
}
zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
zkClient = new ZkClient(zkServer.connectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
port = TestUtils.choosePort();
KafkaConfig config = new KafkaConfig(TestUtils.createBrokerConfig(brokerId, port, true));
Time mock = new MockTime();
kafkaServer = new KafkaServer(config, mock);
kafkaServer.startup();
initialized = true;
}
示例2: setUpClass
import kafka.server.KafkaConfig; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topics
AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例3: startKafkaBrokers
import kafka.server.KafkaConfig; //导入依赖的package包/类
public static void startKafkaBrokers(int numberOfBrokers) {
kafkaServers = new ArrayList<>(numberOfBrokers);
kafkaProps = new HashMap<>();
// setup Broker
StringBuilder sb = new StringBuilder();
for(int i = 0; i < numberOfBrokers; i ++) {
int port = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(i, port);
props.put("auto.create.topics.enable", "false");
kafkaServers.add(TestUtils.createServer(new KafkaConfig(props), new MockTime()));
sb.append("localhost:").append(port).append(",");
}
metadataBrokerURI = sb.deleteCharAt(sb.length()-1).toString();
LOG.info("Setting metadataBrokerList and auto.offset.reset for test case");
kafkaProps.put("auto.offset.reset", "smallest");
}
示例4: setUpClass
import kafka.server.KafkaConfig; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topic
AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例5: start
import kafka.server.KafkaConfig; //导入依赖的package包/类
public void start() throws IOException {
String zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient =
new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
brokerPort = TestUtils.choosePort();
logger.info("{}", brokerPort);;
Properties props = TestUtils.createBrokerConfig(BROKER_ID, brokerPort, true);
// props.setProperty("zookeeper.connect", String.valueOf(zkPort));
props.setProperty("zookeeper.session.timeout.ms", "30000");
props.setProperty("zookeeper.connection.timeout.ms", "30000");
logger.info("{}", props);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
示例6: startServer
import kafka.server.KafkaConfig; //导入依赖的package包/类
public static void startServer() throws RuntimeException {
if ( serverStarted && serverClosed ) {
throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
}
if (!serverStarted) {
serverStarted = true;
zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaPort = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
}
示例7: createKafkaCluster
import kafka.server.KafkaConfig; //导入依赖的package包/类
public final String createKafkaCluster() throws IOException {
System.setProperty("zookeeper.preAllocSize", Integer.toString(128));
zkServer = new EmbeddedZookeeper();
String zkConnect = ZK_HOST + ':' + zkServer.port();
ZkClient zkClient = new ZkClient(zkConnect, SESSION_TIMEOUT, CONNECTION_TIMEOUT, ZKStringSerializer$.MODULE$);
zkUtils = ZkUtils.apply(zkClient, false);
brokerPort = getRandomPort();
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKER_HOST + ':' + brokerPort);
brokerProps.setProperty("offsets.topic.replication.factor", "1");
brokerProps.setProperty("offsets.topic.num.partitions", "1");
// we need this due to avoid OOME while running tests, see https://issues.apache.org/jira/browse/KAFKA-3872
brokerProps.setProperty("log.cleaner.dedupe.buffer.size", Long.toString(2 * 1024 * 1024L));
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
return BROKER_HOST + ':' + brokerPort;
}
示例8: setUpClass
import kafka.server.KafkaConfig; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topics
AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例9: setup
import kafka.server.KafkaConfig; //导入依赖的package包/类
@Before
public void setup() throws IOException {
zkServer = new EmbeddedZookeeper();
String zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST +":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
//AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
JMeterContext jmcx = JMeterContextService.getContext();
jmcx.setVariables(new JMeterVariables());
}
示例10: KafkaSourceEmbeddedKafka
import kafka.server.KafkaConfig; //导入依赖的package包/类
public KafkaSourceEmbeddedKafka(Properties properties) {
zookeeper = new KafkaSourceEmbeddedZookeeper(zkPort);
dir = new File(System.getProperty("java.io.tmpdir"), "kafka_log-" + UUID.randomUUID());
try {
FileUtils.deleteDirectory(dir);
} catch (IOException e) {
e.printStackTrace();
}
Properties props = new Properties();
props.put("zookeeper.connect",zookeeper.getConnectString());
props.put("broker.id","1");
props.put("host.name", "localhost");
props.put("port", String.valueOf(serverPort));
props.put("log.dir", dir.getAbsolutePath());
if (properties != null) {
props.putAll(properties);
}
KafkaConfig config = new KafkaConfig(props);
kafkaServer = new KafkaServerStartable(config);
kafkaServer.startup();
initProducer();
}
示例11: startKafkaServers
import kafka.server.KafkaConfig; //导入依赖的package包/类
private void startKafkaServers() throws IOException {
for (int i = 0; i < numberOfKafkaBrokers; i++) {
int port = kafkaPort + i;
File logDir = folder.mkSubDir(String.format("kafka-local-%s-%s", kafkaPort, i));
Properties properties = new Properties();
properties.setProperty(KafkaConfig.ZkConnectProp(), String.format("%s:%s", LOCALHOST, zookeeperPort));
properties.setProperty(KafkaConfig.BrokerIdProp(), String.valueOf(i + 1));
properties.setProperty(KafkaConfig.ListenersProp(), String.format("PLAINTEXT://%s:%s", LOCALHOST, port));
properties.setProperty(KafkaConfig.LogDirProp(), logDir.getAbsolutePath());
properties.setProperty(KafkaConfig.LogFlushIntervalMessagesProp(), String.valueOf(1));
properties.setProperty(KafkaConfig.LogFlushIntervalMsProp(), String.valueOf(Long.MAX_VALUE));
properties.setProperty(KafkaConfig.ControlledShutdownEnableProp(), String.valueOf(false));
properties.setProperty(KafkaConfig.DeleteTopicEnableProp(), String.valueOf(true));
properties.setProperty(KafkaConfig.NumPartitionsProp(), String.valueOf(numberOfKafkaBrokers));
KafkaServerStartable broker = startBroker(properties);
brokers.add(broker);
}
}
示例12: start
import kafka.server.KafkaConfig; //导入依赖的package包/类
/**
* Starts the Kafka broker.
*
* @throws IOException if an error occurs during initialization
*/
public synchronized void start() throws IOException {
log.info("Starting Kafka broker on port {}", port);
logsDir = Files.createTempDirectory(LocalKafkaBroker.class.getSimpleName());
logsDir.toFile().deleteOnExit();
kafkaServer = new KafkaServerStartable(new KafkaConfig(ConfigUtils.keyValueToProperties(
"broker.id", TEST_BROKER_ID,
"log.dirs", logsDir.toAbsolutePath(),
"listeners", "PLAINTEXT://:" + port,
"zookeeper.connect", "localhost:" + zkPort,
// Above are for Kafka 0.8; following are for 0.9+
"message.max.bytes", 1 << 26,
"replica.fetch.max.bytes", 1 << 26
), false));
kafkaServer.startup();
}
示例13: startServer
import kafka.server.KafkaConfig; //导入依赖的package包/类
public static KafkaServerStartable startServer(final int port, final int brokerId,
final String zkStr, final Properties configuration) {
// Create the ZK nodes for Kafka, if needed
int indexOfFirstSlash = zkStr.indexOf('/');
if (indexOfFirstSlash != -1) {
String bareZkUrl = zkStr.substring(0, indexOfFirstSlash);
String zkNodePath = zkStr.substring(indexOfFirstSlash);
ZkClient client = new ZkClient(bareZkUrl);
client.createPersistent(zkNodePath, true);
client.close();
}
File logDir = new File("/tmp/kafka-" + Double.toHexString(Math.random()));
logDir.mkdirs();
configureKafkaPort(configuration, port);
configureZkConnectionString(configuration, zkStr);
configureBrokerId(configuration, brokerId);
configureKafkaLogDirectory(configuration, logDir);
KafkaConfig config = new KafkaConfig(configuration);
KafkaServerStartable serverStartable = new KafkaServerStartable(config);
serverStartable.startup();
return serverStartable;
}
示例14: createKafkaServer
import kafka.server.KafkaConfig; //导入依赖的package包/类
private KafkaServer createKafkaServer(KafkaConfig kafkaConfig) {
return new KafkaServer(kafkaConfig, new Time() {
@Override
public long milliseconds() {
return System.currentTimeMillis();
}
@Override
public long nanoseconds() {
return System.nanoTime();
}
@Override
public void sleep(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
Thread.interrupted();
}
}
});
}
示例15: createMonitoringTopicIfNotExists
import kafka.server.KafkaConfig; //导入依赖的package包/类
/**
* Create the topic that the monitor uses to monitor the cluster. This method attempts to create a topic so that all
* the brokers in the cluster will have partitionToBrokerRatio partitions. If the topic exists, but has different parameters
* then this does nothing to update the parameters.
*
* TODO: Do we care about rack aware mode? I would think no because we want to spread the topic over all brokers.
* @param zkUrl zookeeper connection url
* @param topic topic name
* @param replicationFactor the replication factor for the topic
* @param partitionToBrokerRatio This is multiplied by the number brokers to compute the number of partitions in the topic.
* @param topicConfig additional parameters for the topic for example min.insync.replicas
* @return the number of partitions created
*/
public static int createMonitoringTopicIfNotExists(String zkUrl, String topic, int replicationFactor,
double partitionToBrokerRatio, Properties topicConfig) {
ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
if (AdminUtils.topicExists(zkUtils, topic)) {
return getPartitionNumForTopic(zkUrl, topic);
}
int brokerCount = zkUtils.getAllBrokersInCluster().size();
int partitionCount = (int) Math.ceil(brokerCount * partitionToBrokerRatio);
try {
AdminUtils.createTopic(zkUtils, topic, partitionCount, replicationFactor, topicConfig, RackAwareMode.Enforced$.MODULE$);
} catch (TopicExistsException e) {
//There is a race condition with the consumer.
LOG.debug("Monitoring topic " + topic + " already exists in cluster " + zkUrl, e);
return getPartitionNumForTopic(zkUrl, topic);
}
LOG.info("Created monitoring topic " + topic + " in cluster " + zkUrl + " with " + partitionCount + " partitions, min ISR of "
+ topicConfig.get(KafkaConfig.MinInSyncReplicasProp()) + " and replication factor of " + replicationFactor + ".");
return partitionCount;
} finally {
zkUtils.close();
}
}