本文整理汇总了Java中kafka.utils.ZkUtils.close方法的典型用法代码示例。如果您正苦于以下问题:Java ZkUtils.close方法的具体用法?Java ZkUtils.close怎么用?Java ZkUtils.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.utils.ZkUtils
的用法示例。
在下文中一共展示了ZkUtils.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: closeZkUtilsWithTimeout
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
public static void closeZkUtilsWithTimeout(ZkUtils zkUtils, long timeoutMs) {
Thread t = new Thread() {
@Override
public void run() {
zkUtils.close();
}
};
t.setDaemon(true);
t.start();
try {
t.join(timeoutMs);
} catch (InterruptedException e) {
// let it go
}
if (t.isAlive()) {
t.interrupt();
}
}
示例2: lookupBootstrap
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* Generates the Kafka bootstrap connection string from the metadata stored in Zookeeper.
* Allows for backwards compatibility of the zookeeperConnect configuration.
*/
private String lookupBootstrap(String zookeeperConnect, SecurityProtocol securityProtocol) {
ZkUtils zkUtils = ZkUtils.apply(zookeeperConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT,
JaasUtils.isZkSecurityEnabled());
try {
List<BrokerEndPoint> endPoints =
asJavaListConverter(zkUtils.getAllBrokerEndPointsForChannel(securityProtocol)).asJava();
List<String> connections = new ArrayList<>();
for (BrokerEndPoint endPoint : endPoints) {
connections.add(endPoint.connectionString());
}
return StringUtils.join(connections, ',');
} finally {
zkUtils.close();
}
}
示例3: getOffsets
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
* @param groupID consumer group to get offsets for
* @param topic topic to get offsets for
* @return mapping of (topic and) partition to offset
*/
public static Map<Pair<String,Integer>,Long> getOffsets(String zkServers,
String groupID,
String topic) {
ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupID, topic);
Map<Pair<String,Integer>,Long> offsets = new HashMap<>();
ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
try {
List<?> partitions = JavaConversions.seqAsJavaList(
zkUtils.getPartitionsForTopics(
JavaConversions.asScalaBuffer(Collections.singletonList(topic))).head()._2());
partitions.forEach(partition -> {
String partitionOffsetPath = topicDirs.consumerOffsetDir() + "/" + partition;
Option<String> maybeOffset = zkUtils.readDataMaybeNull(partitionOffsetPath)._1();
Long offset = maybeOffset.isDefined() ? Long.valueOf(maybeOffset.get()) : null;
offsets.put(new Pair<>(topic, Integer.valueOf(partition.toString())), offset);
});
} finally {
zkUtils.close();
}
return offsets;
}
示例4: setOffsets
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
* @param groupID consumer group to update
* @param offsets mapping of (topic and) partition to offset to push to Zookeeper
*/
@SuppressWarnings("deprecation")
public static void setOffsets(String zkServers,
String groupID,
Map<Pair<String,Integer>,Long> offsets) {
ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
try {
offsets.forEach((topicAndPartition, offset) -> {
ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupID, topicAndPartition.getFirst());
int partition = topicAndPartition.getSecond();
String partitionOffsetPath = topicDirs.consumerOffsetDir() + "/" + partition;
// TODO replace call below with defaultAcls(false, "") when < 0.10.2 is supported
zkUtils.updatePersistentPath(partitionOffsetPath,
Long.toString(offset),
ZkUtils$.MODULE$.DefaultAcls(false));
});
} finally {
zkUtils.close();
}
}
示例5: createMonitoringTopicIfNotExists
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* Create the topic that the monitor uses to monitor the cluster. This method attempts to create a topic so that all
* the brokers in the cluster will have partitionToBrokerRatio partitions. If the topic exists, but has different parameters
* then this does nothing to update the parameters.
*
* TODO: Do we care about rack aware mode? I would think no because we want to spread the topic over all brokers.
* @param zkUrl zookeeper connection url
* @param topic topic name
* @param replicationFactor the replication factor for the topic
* @param partitionToBrokerRatio This is multiplied by the number brokers to compute the number of partitions in the topic.
* @param topicConfig additional parameters for the topic for example min.insync.replicas
* @return the number of partitions created
*/
public static int createMonitoringTopicIfNotExists(String zkUrl, String topic, int replicationFactor,
double partitionToBrokerRatio, Properties topicConfig) {
ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
if (AdminUtils.topicExists(zkUtils, topic)) {
return getPartitionNumForTopic(zkUrl, topic);
}
int brokerCount = zkUtils.getAllBrokersInCluster().size();
int partitionCount = (int) Math.ceil(brokerCount * partitionToBrokerRatio);
try {
AdminUtils.createTopic(zkUtils, topic, partitionCount, replicationFactor, topicConfig, RackAwareMode.Enforced$.MODULE$);
} catch (TopicExistsException e) {
//There is a race condition with the consumer.
LOG.debug("Monitoring topic " + topic + " already exists in cluster " + zkUrl, e);
return getPartitionNumForTopic(zkUrl, topic);
}
LOG.info("Created monitoring topic " + topic + " in cluster " + zkUrl + " with " + partitionCount + " partitions, min ISR of "
+ topicConfig.get(KafkaConfig.MinInSyncReplicasProp()) + " and replication factor of " + replicationFactor + ".");
return partitionCount;
} finally {
zkUtils.close();
}
}
示例6: getLeaderToShutDown
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
@Override
public int getLeaderToShutDown(String topic) throws Exception {
ZkUtils zkUtils = getZkUtils();
try {
PartitionMetadata firstPart = null;
do {
if (firstPart != null) {
LOG.info("Unable to find leader. error code {}", firstPart.errorCode());
// not the first try. Sleep a bit
Thread.sleep(150);
}
Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionsMetadata();
firstPart = partitionMetadata.head();
}
while (firstPart.errorCode() != 0);
return firstPart.leader().get().id();
} finally {
zkUtils.close();
}
}
示例7: deleteTestTopic
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
@Override
public void deleteTestTopic(String topic) {
ZkUtils zkUtils = getZkUtils();
try {
LOG.info("Deleting topic {}", topic);
ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());
AdminUtils.deleteTopic(zkUtils, topic);
zk.close();
} finally {
zkUtils.close();
}
}
示例8: getLeaderToShutDown
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
@Override
public int getLeaderToShutDown(String topic) throws Exception {
ZkUtils zkUtils = getZkUtils();
try {
MetadataResponse.PartitionMetadata firstPart = null;
do {
if (firstPart != null) {
LOG.info("Unable to find leader. error code {}", firstPart.error().code());
// not the first try. Sleep a bit
Thread.sleep(150);
}
List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata();
firstPart = partitionMetadata.get(0);
}
while (firstPart.error().code() != 0);
return firstPart.leader().id();
} finally {
zkUtils.close();
}
}
示例9: putOffsetInfoIntoZk
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
private static void putOffsetInfoIntoZk(String groupId, Map<String, Map<Integer, Long>> topicOffsetsMap) {
ZkUtils zkUtils =
ZkUtils.apply(AuditConfig.INGESTER_ZK_CONNECT, Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS),
Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS), false);
try {
for (Map.Entry<String, Map<Integer, Long>> topicEntry : topicOffsetsMap.entrySet()) {
String zkPath = String.format("%s/%s/offsets/%s/", ZkUtils.ConsumersPath(), groupId, topicEntry.getKey());
for (Map.Entry<Integer, Long> offsetEntry : topicEntry.getValue().entrySet()) {
logger.info("Put offset={} to partition={} with znode path={}", offsetEntry.getValue(), offsetEntry.getKey(),
zkPath + offsetEntry.getKey());
zkUtils.updatePersistentPath(zkPath + offsetEntry.getKey(), offsetEntry.getValue().toString(),
zkUtils.DefaultAcls());
}
}
} catch (Exception e) {
logger.error("Got exception to put offset, with zkPathPrefix={}",
String.format("%s/%s/offsets", ZkUtils.ConsumersPath(), groupId));
throw e;
} finally {
zkUtils.close();
}
}
示例10: removeOffsetInfoFromZk
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
private static void removeOffsetInfoFromZk(final String groupId) {
ZkUtils zkUtils =
ZkUtils.apply(AuditConfig.INGESTER_ZK_CONNECT, Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS),
Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS), false);
try {
String[] targets = new String[] {"offsets", "owners"};
for (String target : targets) {
String zkPath = String.format("%s/%s/%s", ZkUtils.ConsumersPath(), groupId, target);
logger.info("Remove {} with znode path={}", target, zkPath);
zkUtils.deletePathRecursive(zkPath);
}
} catch (Exception e) {
logger.error("Got exception to remove offsets or owners from zookeeper, with zkPathPrefix={}",
String.format("%s/%s/", ZkUtils.ConsumersPath(), groupId));
throw e;
} finally {
zkUtils.close();
}
}
示例11: createTopic
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* Creates a set of Kafka topics for each topic that does not already exist.
*
* @param zookeeperServers - The Zookeeper servers that are used by the Kafka Streams program. (not null)
* @param topicNames - The topics that will be created. (not null)
* @param partitions - The number of partitions that each of the topics will have.
* @param replicationFactor - The replication factor of the topics that are created.
*/
public static void createTopic(
final String zookeeperServers,
final Set<String> topicNames,
final int partitions,
final int replicationFactor) {
requireNonNull(zookeeperServers);
requireNonNull(topicNames);
ZkUtils zkUtils = null;
try {
zkUtils = ZkUtils.apply(new ZkClient(zookeeperServers, 30000, 30000, ZKStringSerializer$.MODULE$), false);
for(final String topicName : topicNames) {
if(!AdminUtils.topicExists(zkUtils, topicName)) {
AdminUtils.createTopic(zkUtils, topicName, partitions, replicationFactor, new Properties(), RackAwareMode.Disabled$.MODULE$);
}
}
}
finally {
if(zkUtils != null) {
zkUtils.close();
}
}
}
示例12: createAtlasTopic
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* Create an Atlas topic.
*
* The topic will get created based on following conditions:
* {@link #ATLAS_NOTIFICATION_CREATE_TOPICS_KEY} is set to true.
* The topic does not already exist.
* Note that despite this, there could be multiple topic creation calls that happen in parallel because hooks
* run in a distributed fashion. Exceptions are caught and logged by this method to prevent the startup of
* the hooks from failing.
* @param atlasProperties {@link Configuration} containing properties to be used for creating topics.
* @param topicNames list of topics to create
*/
public void createAtlasTopic(Configuration atlasProperties, String... topicNames) {
if (atlasProperties.getBoolean(ATLAS_NOTIFICATION_CREATE_TOPICS_KEY, true)) {
if (!handleSecurity(atlasProperties)) {
return;
}
ZkUtils zkUtils = createZkUtils(atlasProperties);
for (String topicName : topicNames) {
try {
LOG.warn("Attempting to create topic {}", topicName);
if (!ifTopicExists(topicName, zkUtils)) {
createTopic(atlasProperties, topicName, zkUtils);
} else {
LOG.warn("Ignoring call to create topic {}, as it already exists.", topicName);
}
} catch (Throwable t) {
LOG.error("Failed while creating topic {}", topicName, t);
}
}
zkUtils.close();
} else {
LOG.info("Not creating topics {} as {} is false", StringUtils.join(topicNames, ","),
ATLAS_NOTIFICATION_CREATE_TOPICS_KEY);
}
}
示例13: createTopic
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
public void createTopic(String name, int numPartitions, boolean waitUntilMetadataIsPropagated) throws InterruptedException {
ZkUtils zkUtils = null;
Level oldLevel = UnitTestHelper.getJavaLoggingLevel();
try {
UnitTestHelper.setJavaLoggingLevel(Level.OFF);
zkUtils = ZkUtils.apply(zookeeperConnectString, 30000, 30000, false);
AdminUtilsWrapper.createTopic(zkUtils, name, numPartitions, 1, new Properties());
if (waitUntilMetadataIsPropagated) {
waitUntilMetadataIsPropagated(name, numPartitions);
}
}catch(TopicExistsException tee) {
}finally {
if(zkUtils != null){
zkUtils.close();
}
UnitTestHelper.setJavaLoggingLevel(oldLevel);
}
}
示例14: createKafkaTopicIfNecessary
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
public static boolean createKafkaTopicIfNecessary(String brokerUri, int replFactor, int numPartitions, String topic)
throws IOException {
URI zkUri = URI.create(brokerUri);
Preconditions.checkArgument("zk".equals(zkUri.getScheme()));
String zkServerList = zkUri.getAuthority() + zkUri.getPath();
ZkUtils zkUtils = ZkUtils.apply(zkServerList, ZK_SESSION_TIMEOUT_MS,
ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
if (AdminUtils.topicExists(zkUtils, topic)) {
return false;
}
try {
AdminUtils.createTopic(zkUtils, topic, numPartitions, replFactor, new Properties());
} catch (TopicExistsException ignored) {
return false;
} catch (RuntimeException e) {
throw new IOException(e);
}
} finally {
if (zkUtils != null) {
zkUtils.close();
}
}
return true;
}
示例15: initializeBrokerMetaData
import kafka.utils.ZkUtils; //导入方法依赖的package包/类
/**
* Generates a map of <broker_id, rack_id>
*
* @param zookeeper
* zookeeper observer url
* @param isRackEnabled
* true if cluster is rack aware else false
*/
private void initializeBrokerMetaData(String zookeeper, boolean isRackEnabled) {
ZkUtils zkUtils = ZkUtils.apply(zookeeper, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, false);
try {
Seq<BrokerMetadata> brokersMetadataSeq = AdminUtils.getBrokerMetadatas(zkUtils,
RackAwareMode.Enforced$.MODULE$, scala.Option.apply(null));
List<BrokerMetadata> brokersMetadata = scalaSeqToJavaList(brokersMetadataSeq);
for (BrokerMetadata brokerMetadata : brokersMetadata) {
Option<String> rack = brokerMetadata.rack();
String rackId;
if (!isRackEnabled) {
if (rack != null && !rack.isEmpty()) {
throw new UnsupportedOperationException("Non RackAware mode selected but rack: " + rack.get().toString()
+ " defined for broker: " + brokerMetadata.id() + ", cannot proceed, aborting. Run balancer with ---rack-aware flag");
}
rackId = null;
} else if (rack == null || rack.isEmpty()) {
throw new UnsupportedOperationException("RackAware mode selected but Rack not defined for broker: "
+ brokerMetadata.id() + ", cannot proceed, aborting");
} else {
rackId = rack.get().toString();
}
brokerMetaDataMap.put(brokerMetadata.id(), new BrokerMetaData(rackId, 0, 0));
}
} finally {
zkUtils.close();
}
}