本文整理汇总了Java中org.apache.kafka.clients.CommonClientConfigs类的典型用法代码示例。如果您正苦于以下问题:Java CommonClientConfigs类的具体用法?Java CommonClientConfigs怎么用?Java CommonClientConfigs使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CommonClientConfigs类属于org.apache.kafka.clients包,在下文中一共展示了CommonClientConfigs类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testOldProperties
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void testOldProperties() {
KafkaSink kafkaSink = new KafkaSink();
Context context = new Context();
context.put("topic", "test-topic");
context.put(OLD_BATCH_SIZE, "300");
context.put(BROKER_LIST_FLUME_KEY, "localhost:9092,localhost:9092");
context.put(REQUIRED_ACKS_FLUME_KEY, "all");
Configurables.configure(kafkaSink, context);
Properties kafkaProps = kafkaSink.getKafkaProps();
assertEquals(kafkaSink.getTopic(), "test-topic");
assertEquals(kafkaSink.getBatchSize(), 300);
assertEquals(kafkaProps.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
"localhost:9092,localhost:9092");
assertEquals(kafkaProps.getProperty(ProducerConfig.ACKS_CONFIG), "all");
}
示例2: getDefaultParameters
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
* Set default parameters and their values
*
* @return
*/
@Override
public Arguments getDefaultParameters() {
Arguments defaultParameters = new Arguments();
defaultParameters.addArgument(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ProducerKeys.BOOTSTRAP_SERVERS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.ZOOKEEPER_SERVERS, ProducerKeys.ZOOKEEPER_SERVERS_DEFAULT);
defaultParameters.addArgument(ProducerKeys.KAFKA_TOPIC_CONFIG, ProducerKeys.KAFKA_TOPIC_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ProducerKeys.KEY_SERIALIZER_CLASS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ProducerKeys.VALUE_SERIALIZER_CLASS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.COMPRESSION_TYPE_CONFIG, ProducerKeys.COMPRESSION_TYPE_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.BATCH_SIZE_CONFIG, ProducerKeys.BATCH_SIZE_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.LINGER_MS_CONFIG, ProducerKeys.LINGER_MS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.BUFFER_MEMORY_CONFIG, ProducerKeys.BUFFER_MEMORY_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.ACKS_CONFIG, ProducerKeys.ACKS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.SEND_BUFFER_CONFIG, ProducerKeys.SEND_BUFFER_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.RECEIVE_BUFFER_CONFIG, ProducerKeys.RECEIVE_BUFFER_CONFIG_DEFAULT);
defaultParameters.addArgument(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.PLAINTEXT.name);
defaultParameters.addArgument(PropsKeys.MESSAGE_PLACEHOLDER_KEY, PropsKeys.MSG_PLACEHOLDER);
defaultParameters.addArgument(ProducerKeys.KERBEROS_ENABLED, ProducerKeys.KERBEROS_ENABLED_DEFULAT);
defaultParameters.addArgument(ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG, ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.JAVA_SEC_KRB5_CONFIG, ProducerKeys.JAVA_SEC_KRB5_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.SASL_KERBEROS_SERVICE_NAME, ProducerKeys.SASL_KERBEROS_SERVICE_NAME_DEFAULT);
defaultParameters.addArgument(ProducerKeys.SASL_MECHANISM, ProducerKeys.SASL_MECHANISM_DEFAULT);
return defaultParameters;
}
示例3: overridingProps
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Override
public Properties overridingProps() {
Properties props = new Properties();
int port = findLocalPort();
// We need to convert all the properties to the Cruise Control properties.
setSecurityConfigs(props, "producer");
for (String configName : ProducerConfig.configNames()) {
Object value = props.get(configName);
if (value != null) {
props.remove(configName);
props.put(appendPrefix(configName), value);
}
}
props.setProperty("metric.reporters", CruiseControlMetricsReporter.class.getName());
props.setProperty("listeners", "SSL://127.0.0.1:" + port);
props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), "127.0.0.1:" + port);
props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG), "SSL");
props.setProperty(CRUISE_CONTROL_METRICS_REPORTING_INTERVAL_MS_CONFIG, "100");
props.setProperty(CRUISE_CONTROL_METRICS_TOPIC_CONFIG, TOPIC);
return props;
}
示例4: initialize
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
public void initialize(String servers) {
if (isInitialized.get()) {
logger.warn("Already initialized");
return;
}
Properties props = new Properties();
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, SixtPartitioner.class.getName());
props.put(ProducerConfig.RETRIES_CONFIG, "3");
props.put(ProducerConfig.ACKS_CONFIG, "all");
properties.forEach(props::put);
realProducer = new KafkaProducer<>(props);
isInitialized.set(true);
}
示例5: create
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
* Create a new AdminClient instance.
* @param clusterConfig What cluster to connect to.
* @param clientId What clientId to associate the connection with.
*/
public AdminClient create(final ClusterConfig clusterConfig, final String clientId) {
// Create a map
final Map<String, Object> config = new HashMap<>();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterConfig.getConnectString());
config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId);
config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout);
if (clusterConfig.isUseSsl()) {
config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
config.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getKeyStoreFile());
config.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, clusterConfig.getKeyStorePassword());
config.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getTrustStoreFile());
config.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, clusterConfig.getTrustStorePassword());
}
return KafkaAdminClient.create(config);
}
示例6: testOldConfig
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void testOldConfig() throws Exception {
Context context = new Context();
context.put(BROKER_LIST_FLUME_KEY,testUtil.getKafkaServerUrl());
context.put(GROUP_ID_FLUME,"flume-something");
context.put(READ_SMALLEST_OFFSET,"true");
context.put("topic",topic);
final KafkaChannel channel = new KafkaChannel();
Configurables.configure(channel, context);
Properties consumerProps = channel.getConsumerProps();
Properties producerProps = channel.getProducerProps();
Assert.assertEquals(producerProps.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
testUtil.getKafkaServerUrl());
Assert.assertEquals(consumerProps.getProperty(ConsumerConfig.GROUP_ID_CONFIG),
"flume-something");
Assert.assertEquals(consumerProps.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG),
"earliest");
}
示例7: getConsumerConfigs
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
* Get the configs to the {@link KafkaConsumer consumer}.
* Properties using the prefix {@link #CONSUMER_PREFIX} will be used in favor over their non-prefixed versions
* except in the case of {@link ConsumerConfig#BOOTSTRAP_SERVERS_CONFIG} where we always use the non-prefixed
* version as we only support reading/writing from/to the same Kafka Cluster.
*
* @param streamThread the {@link StreamThread} creating a consumer
* @param groupId consumer groupId
* @param clientId clientId
* @return Map of the consumer configuration.
* @throws ConfigException if {@code "enable.auto.commit"} was set to {@code false} by the user
*/
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId) throws ConfigException {
final Map<String, Object> consumerProps = getCommonConsumerConfigs();
// add client id with stream client id prefix, and group id
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer");
// add configs required for stream partition assignor
consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread);
consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName());
consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));
consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));
return consumerProps;
}
示例8: getProducerConfigs
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
* Get the configs for the {@link KafkaProducer producer}.
* Properties using the prefix {@link #PRODUCER_PREFIX} will be used in favor over their non-prefixed versions
* except in the case of {@link ProducerConfig#BOOTSTRAP_SERVERS_CONFIG} where we always use the non-prefixed
* version as we only support reading/writing from/to the same Kafka Cluster.
*
* @param clientId clientId
* @return Map of the producer configuration.
*/
public Map<String, Object> getProducerConfigs(final String clientId) {
final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
if (eosEnabled) {
if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) {
throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG
+ "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled.");
}
if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) {
throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION
+ "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection.");
}
}
// generate producer configs from original properties and overridden maps
final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES);
props.putAll(clientProvidedProps);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG));
// add client id with stream client id prefix
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer");
return props;
}
示例9: testSerializerClose
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void testSerializerClose() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL);
final int oldInitCount = MockSerializer.INIT_COUNT.get();
final int oldCloseCount = MockSerializer.CLOSE_COUNT.get();
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(
configs, new MockSerializer(), new MockSerializer());
assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get());
assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get());
producer.close();
assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get());
assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get());
}
示例10: defaultProps
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
public Properties defaultProps() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, HopsUtil.getBrokerEndpoints());
props.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
//configure the ssl parameters
props.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
props.setProperty(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, HopsUtil.getTrustStore());
props.setProperty(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, HopsUtil.getTruststorePwd());
props.setProperty(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, HopsUtil.getKeyStore());
props.setProperty(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, HopsUtil.getKeystorePwd());
props.setProperty(SslConfigs.SSL_KEY_PASSWORD_CONFIG, HopsUtil.getKeystorePwd());
return props;
}
示例11: getSparkStructuredStreamingKafkaProps
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
*
* @param userOptions
* @return
*/
public Map<String, String> getSparkStructuredStreamingKafkaProps(Map<String, String> userOptions) {
//Create options map for kafka
Map<String, String> options = new HashMap<>();
options.put("kafka." + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, HopsUtil.getBrokerEndpoints());
options.put("subscribe", HopsUtil.getTopicsAsCSV());
options.put("kafka." + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
options.put("kafka." + SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, HopsUtil.getTrustStore());
options.put("kafka." + SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, HopsUtil.getTruststorePwd());
options.put("kafka." + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, HopsUtil.getKeyStore());
options.put("kafka." + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, HopsUtil.getKeystorePwd());
options.put("kafka." + SslConfigs.SSL_KEY_PASSWORD_CONFIG, HopsUtil.getKeystorePwd());
if (userOptions != null) {
options.putAll(userOptions);
}
return options;
}
示例12: testSerializerClose
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void testSerializerClose() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL);
final int oldInitCount = MockSerializer.INIT_COUNT.get();
final int oldCloseCount = MockSerializer.CLOSE_COUNT.get();
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(
configs, new MockSerializer(), new MockSerializer());
Assert.assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get());
Assert.assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get());
producer.close();
Assert.assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get());
Assert.assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get());
}
示例13: configureKafkaMetrics
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
/**
* Method for setting Kafka-related properties, required for proper updating of metrics.
*
* @param configs {@link Map} with Kafka-specific properties, required to initialize the appropriate consumer/producer.
* @param gaugeService reference to an instance of Springs {@link GaugeService}, used to set the collected metrics
* @param prefix initial part of the metric's label
* @param executorService reference to an instance of {@link ScheduledExecutorService}, used to schedule periodic values' recalculation for metrics
* @param updateInterval interval for iterating the whole set of tracked metrics to recalculate and resubmit their values
*/
public static void configureKafkaMetrics(Map<String, Object> configs, GaugeService gaugeService, String prefix, ScheduledExecutorService executorService, Long updateInterval) {
if (gaugeService == null) {
throw new IllegalArgumentException("Initializing GaugeService as null is meaningless!");
}
configs.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, Collections.singletonList(KafkaStatisticsProvider.class.getName()));
configs.put(KafkaStatisticsProvider.METRICS_GAUGE_SERVICE_IMPL, gaugeService);
LOGGER.debug("Set property {} with provided GaugeService instance reference", KafkaStatisticsProvider.METRICS_GAUGE_SERVICE_IMPL);
if (executorService != null) {
configs.put(KafkaStatisticsProvider.METRICS_UPDATE_EXECUTOR_IMPL, executorService);
LOGGER.debug("Set property {} with provided ScheduledExecutorService instance reference", KafkaStatisticsProvider.METRICS_UPDATE_EXECUTOR_IMPL);
}
if (updateInterval != null) {
configs.put(KafkaStatisticsProvider.METRICS_UPDATE_INTERVAL_PARAM, updateInterval);
LOGGER.debug("Set property {} with value {}", KafkaStatisticsProvider.METRICS_UPDATE_INTERVAL_PARAM, updateInterval);
}
if (prefix != null) {
configs.put(KafkaStatisticsProvider.METRICS_PREFIX_PARAM, prefix);
LOGGER.debug("Set property {} with value {}", KafkaStatisticsProvider.METRICS_PREFIX_PARAM, prefix);
}
}
示例14: configureKafkaMetrics_withGaugeServiceAndPrefix
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void configureKafkaMetrics_withGaugeServiceAndPrefix() {
Map<String, Object> config = new HashMap<>();
assertThat(config).isEmpty();
GaugeService gaugeService = mockGaugeService();
String prefix = "test.prefix";
KafkaConfigUtils.configureKafkaMetrics(config, gaugeService, prefix, null, null);
assertThat(config).hasSize(3);
assertThat(config.get(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG))
.asList()
.contains(KafkaStatisticsProvider.class.getCanonicalName());
assertThat(config.get(KafkaStatisticsProvider.METRICS_GAUGE_SERVICE_IMPL)).isSameAs(gaugeService);
assertThat(config.get(KafkaStatisticsProvider.METRICS_PREFIX_PARAM)).isEqualTo(prefix);
assertThat(config.get(KafkaStatisticsProvider.METRICS_UPDATE_EXECUTOR_IMPL)).isNull();
assertThat(config.get(KafkaStatisticsProvider.METRICS_UPDATE_INTERVAL_PARAM)).isNull();
}
示例15: configureKafkaMetrics_withGaugeServiceAndInterval
import org.apache.kafka.clients.CommonClientConfigs; //导入依赖的package包/类
@Test
public void configureKafkaMetrics_withGaugeServiceAndInterval() {
Map<String, Object> config = new HashMap<>();
assertThat(config).isEmpty();
GaugeService gaugeService = mockGaugeService();
Long universalAnswer = 42L;
KafkaConfigUtils.configureKafkaMetrics(config, gaugeService, null, null, universalAnswer);
assertThat(config).hasSize(3);
assertThat(config.get(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG))
.asList()
.contains(KafkaStatisticsProvider.class.getCanonicalName());
assertThat(config.get(KafkaStatisticsProvider.METRICS_GAUGE_SERVICE_IMPL)).isSameAs(gaugeService);
assertThat(config.get(KafkaStatisticsProvider.METRICS_PREFIX_PARAM)).isNull();
assertThat(config.get(KafkaStatisticsProvider.METRICS_UPDATE_EXECUTOR_IMPL)).isNull();
assertThat(config.get(KafkaStatisticsProvider.METRICS_UPDATE_INTERVAL_PARAM)).isEqualTo(universalAnswer);
}