本文整理汇总了Java中kafka.utils.ZkUtils类的典型用法代码示例。如果您正苦于以下问题:Java ZkUtils类的具体用法?Java ZkUtils怎么用?Java ZkUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ZkUtils类属于kafka.utils包,在下文中一共展示了ZkUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUpClass
import kafka.utils.ZkUtils; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topics
AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例2: setUpClass
import kafka.utils.ZkUtils; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topic
AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例3: setUpClass
import kafka.utils.ZkUtils; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
int zkConnectionTimeout = 6000;
int zkSessionTimeout = 6000;
zookeeper = new EmbeddedZookeeper();
zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
zkUtils = ZkUtils.apply(
zkConnect, zkSessionTimeout, zkConnectionTimeout,
JaasUtils.isZkSecurityEnabled());
port = NetworkUtils.getRandomPort();
kafkaServer = TestUtil09.createKafkaServer(port, zkConnect);
for (int i = 0; i < topics.length; i++) {
topics[i] = UUID.randomUUID().toString();
AdminUtils.createTopic(zkUtils, topics[i], 1, 1, new Properties());
TestUtils.waitUntilMetadataIsPropagated(
scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)),
topics[i], 0, 5000);
}
}
示例4: createKafkaCluster
import kafka.utils.ZkUtils; //导入依赖的package包/类
public final String createKafkaCluster() throws IOException {
System.setProperty("zookeeper.preAllocSize", Integer.toString(128));
zkServer = new EmbeddedZookeeper();
String zkConnect = ZK_HOST + ':' + zkServer.port();
ZkClient zkClient = new ZkClient(zkConnect, SESSION_TIMEOUT, CONNECTION_TIMEOUT, ZKStringSerializer$.MODULE$);
zkUtils = ZkUtils.apply(zkClient, false);
brokerPort = getRandomPort();
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKER_HOST + ':' + brokerPort);
brokerProps.setProperty("offsets.topic.replication.factor", "1");
brokerProps.setProperty("offsets.topic.num.partitions", "1");
// we need this due to avoid OOME while running tests, see https://issues.apache.org/jira/browse/KAFKA-3872
brokerProps.setProperty("log.cleaner.dedupe.buffer.size", Long.toString(2 * 1024 * 1024L));
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
return BROKER_HOST + ':' + brokerPort;
}
示例5: setUpClass
import kafka.utils.ZkUtils; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws IOException {
// setup Zookeeper
zkServer = new EmbeddedZookeeper();
zkConnect = ZKHOST + ":" + zkServer.port();
zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
// setup Broker
Properties brokerProps = new Properties();
brokerProps.setProperty("zookeeper.connect", zkConnect);
brokerProps.setProperty("broker.id", "0");
brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
KafkaConfig config = new KafkaConfig(brokerProps);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
// create topics
AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
示例6: getTopicProperties
import kafka.utils.ZkUtils; //导入依赖的package包/类
/**
* Get topic configuration
*
* @param connection connection
* @param topicName topic name
* @return topic properties
*/
public Properties getTopicProperties(final ZkUtils connection, final String topicName) {
try {
return AdminUtils.fetchEntityConfig(connection, ConfigType.Topic(), topicName);
} catch (IllegalArgumentException | KafkaException e) {
throw new TopicOperationException(topicName, e.getMessage(), e, this.getClass());
}
}
示例7: createTopic
import kafka.utils.ZkUtils; //导入依赖的package包/类
/**
* Create a topic
*
* @param connection Connection
* @param topicName Topic name
* @param partitions The number of partitions for the topic being created
* @param replicationFactor The replication factor for each partition in the topic being created
* @param topicProperties A topic configuration override for an existing topic
* @throws TopicOperationException if topic was not created.
*/
public void createTopic(final ZkUtils connection, final String topicName,
final int partitions,
final int replicationFactor,
final Properties topicProperties) {
try {
AdminUtils.createTopic(connection,
topicName,
partitions,
replicationFactor,
topicProperties);
} catch (IllegalArgumentException | KafkaException | AdminOperationException e) {
throw new TopicOperationException(topicName, e.getMessage(), e, this.getClass());
}
}
示例8: closeZkUtilsWithTimeout
import kafka.utils.ZkUtils; //导入依赖的package包/类
public static void closeZkUtilsWithTimeout(ZkUtils zkUtils, long timeoutMs) {
Thread t = new Thread() {
@Override
public void run() {
zkUtils.close();
}
};
t.setDaemon(true);
t.start();
try {
t.join(timeoutMs);
} catch (InterruptedException e) {
// let it go
}
if (t.isAlive()) {
t.interrupt();
}
}
示例9: ensureTopicCreated
import kafka.utils.ZkUtils; //导入依赖的package包/类
private void ensureTopicCreated(Map<String, ?> config) {
ZkUtils zkUtils = createZkUtils(config);
Map<String, List<PartitionInfo>> topics = _consumers.get(0).listTopics();
long snapshotWindowMs = Long.parseLong((String) config.get(KafkaCruiseControlConfig.LOAD_SNAPSHOT_WINDOW_MS_CONFIG));
int numSnapshotWindows = Integer.parseInt((String) config.get(KafkaCruiseControlConfig.NUM_LOAD_SNAPSHOTS_CONFIG));
long retentionMs = (numSnapshotWindows * ADDITIONAL_SNAPSHOT_WINDOW_TO_RETAIN_FACTOR) * snapshotWindowMs;
Properties props = new Properties();
props.setProperty(LogConfig.RetentionMsProp(), Long.toString(retentionMs));
props.setProperty(LogConfig.CleanupPolicyProp(), DEFAULT_CLEANUP_POLICY);
int replicationFactor = Math.min(2, zkUtils.getAllBrokersInCluster().size());
if (!topics.containsKey(_partitionMetricSampleStoreTopic)) {
AdminUtils.createTopic(zkUtils, _partitionMetricSampleStoreTopic, 32, replicationFactor, props, RackAwareMode.Safe$.MODULE$);
} else {
AdminUtils.changeTopicConfig(zkUtils, _partitionMetricSampleStoreTopic, props);
}
if (!topics.containsKey(_brokerMetricSampleStoreTopic)) {
AdminUtils.createTopic(zkUtils, _brokerMetricSampleStoreTopic, 32, replicationFactor, props, RackAwareMode.Safe$.MODULE$);
} else {
AdminUtils.changeTopicConfig(zkUtils, _brokerMetricSampleStoreTopic, props);
}
KafkaCruiseControlUtils.closeZkUtilsWithTimeout(zkUtils, 10000);
}
示例10: execute
import kafka.utils.ZkUtils; //导入依赖的package包/类
/**
* Start the actual execution of the proposals.
*/
private void execute() {
_zkUtils = ZkUtils.apply(_zkConnect, 30000, 30000, false);
try {
_state.set(ExecutorState.State.REPLICA_MOVEMENT_TASK_IN_PROGRESS);
moveReplicas();
// Start leader movements.
_state.set(ExecutorState.State.LEADER_MOVEMENT_TASK_IN_PROGRESS);
moveLeaders();
} catch (Throwable t) {
LOG.error("Executor got exception during execution", t);
} finally {
// Add the null pointer check for unit test.
if (_loadMonitor != null) {
_loadMonitor.resumeMetricSampling();
}
_stopRequested = false;
_executionTaskManager.clear();
KafkaCruiseControlUtils.closeZkUtilsWithTimeout(_zkUtils, 10000);
_state.set(ExecutorState.State.NO_TASK_IN_PROGRESS);
}
}
示例11: lookupBootstrap
import kafka.utils.ZkUtils; //导入依赖的package包/类
/**
* Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
* Allows for backwards compatibility of the zookeeperConnect configuration.
*/
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
ZkUtils zkUtils = ZkUtils.apply(zookeeperConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT,
JaasUtils.isZkSecurityEnabled());
try {
List<BrokerEndPoint> endPoints =
asJavaListConverter(zkUtils.getAllBrokerEndPointsForChannel(securityProtocol)).asJava();
List<String> connections = new ArrayList<>();
for (BrokerEndPoint endPoint : endPoints) {
connections.add(endPoint.connectionString());
}
return StringUtils.join(connections, ',');
} finally {
zkUtils.close();
}
}
示例12: getZookeeperOffsets
import kafka.utils.ZkUtils; //导入依赖的package包/类
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(ZkUtils client,
String topicStr) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, topicStr);
List<String> partitions = asJavaListConverter(
client.getChildrenParentMayNotExist(topicDirs.consumerOffsetDir())).asJava();
for (String partition : partitions) {
TopicPartition key = new TopicPartition(topicStr, Integer.valueOf(partition));
Option<String> data = client.readDataMaybeNull(
topicDirs.consumerOffsetDir() + "/" + partition)._1();
if (data.isDefined()) {
Long offset = Long.valueOf(data.get());
offsets.put(key, new OffsetAndMetadata(offset));
}
}
return offsets;
}
示例13: getZookeeperOffsets
import kafka.utils.ZkUtils; //导入依赖的package包/类
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets(ZkUtils client) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, topicStr);
List<String> partitions = asJavaListConverter(
client.getChildrenParentMayNotExist(topicDirs.consumerOffsetDir())).asJava();
for (String partition : partitions) {
TopicPartition key = new TopicPartition(topicStr, Integer.valueOf(partition));
Option<String> data = client.readDataMaybeNull(
topicDirs.consumerOffsetDir() + "/" + partition)._1();
if (data.isDefined()) {
Long offset = Long.valueOf(data.get());
offsets.put(key, new OffsetAndMetadata(offset));
}
}
return offsets;
}
示例14: createTopic
import kafka.utils.ZkUtils; //导入依赖的package包/类
/**
* Create a Kafka topic with the given parameters.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
* @param replication The replication factor for (partitions of) this topic.
* @param topicConfig Additional topic-level configuration settings.
*/
public void createTopic(final String topic,
final int partitions,
final int replication,
final Properties topicConfig) {
log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
topic, partitions, replication, topicConfig);
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
final ZkClient zkClient = new ZkClient(
zookeeperConnect(),
DEFAULT_ZK_SESSION_TIMEOUT_MS,
DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
ZKStringSerializer$.MODULE$);
final boolean isSecure = false;
final ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure);
AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$);
zkClient.close();
}
示例15: maybeDeleteInternalTopics
import kafka.utils.ZkUtils; //导入依赖的package包/类
private void maybeDeleteInternalTopics(final ZkUtils zkUtils) {
System.out.println("Deleting all internal/auto-created topics for application " + options.valueOf(applicationIdOption));
for (final String topic : allTopics) {
if (isInternalTopic(topic)) {
try {
if (!dryRun) {
final TopicCommand.TopicCommandOptions commandOptions = new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", options.valueOf(zookeeperOption),
"--delete", "--topic", topic});
TopicCommand.deleteTopic(zkUtils, commandOptions);
} else {
System.out.println("Topic: " + topic);
}
} catch (final RuntimeException e) {
System.err.println("ERROR: Deleting topic " + topic + " failed.");
throw e;
}
}
}
System.out.println("Done.");
}