本文整理汇总了Java中org.apache.kafka.clients.producer.ProducerConfig类的典型用法代码示例。如果您正苦于以下问题:Java ProducerConfig类的具体用法?Java ProducerConfig怎么用?Java ProducerConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ProducerConfig类属于org.apache.kafka.clients.producer包,在下文中一共展示了ProducerConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testOldProperties
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
@Test
public void testOldProperties() {
KafkaSink kafkaSink = new KafkaSink();
Context context = new Context();
context.put("topic", "test-topic");
context.put(OLD_BATCH_SIZE, "300");
context.put(BROKER_LIST_FLUME_KEY, "localhost:9092,localhost:9092");
context.put(REQUIRED_ACKS_FLUME_KEY, "all");
Configurables.configure(kafkaSink, context);
Properties kafkaProps = kafkaSink.getKafkaProps();
assertEquals(kafkaSink.getTopic(), "test-topic");
assertEquals(kafkaSink.getBatchSize(), 300);
assertEquals(kafkaProps.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
"localhost:9092,localhost:9092");
assertEquals(kafkaProps.getProperty(ProducerConfig.ACKS_CONFIG), "all");
}
示例2: getDefaultParameters
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
/**
* Set default parameters and their values
*
* @return
*/
@Override
public Arguments getDefaultParameters() {
Arguments defaultParameters = new Arguments();
defaultParameters.addArgument(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ProducerKeys.BOOTSTRAP_SERVERS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.ZOOKEEPER_SERVERS, ProducerKeys.ZOOKEEPER_SERVERS_DEFAULT);
defaultParameters.addArgument(ProducerKeys.KAFKA_TOPIC_CONFIG, ProducerKeys.KAFKA_TOPIC_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ProducerKeys.KEY_SERIALIZER_CLASS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ProducerKeys.VALUE_SERIALIZER_CLASS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.COMPRESSION_TYPE_CONFIG, ProducerKeys.COMPRESSION_TYPE_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.BATCH_SIZE_CONFIG, ProducerKeys.BATCH_SIZE_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.LINGER_MS_CONFIG, ProducerKeys.LINGER_MS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.BUFFER_MEMORY_CONFIG, ProducerKeys.BUFFER_MEMORY_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.ACKS_CONFIG, ProducerKeys.ACKS_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.SEND_BUFFER_CONFIG, ProducerKeys.SEND_BUFFER_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerConfig.RECEIVE_BUFFER_CONFIG, ProducerKeys.RECEIVE_BUFFER_CONFIG_DEFAULT);
defaultParameters.addArgument(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.PLAINTEXT.name);
defaultParameters.addArgument(PropsKeys.MESSAGE_PLACEHOLDER_KEY, PropsKeys.MSG_PLACEHOLDER);
defaultParameters.addArgument(ProducerKeys.KERBEROS_ENABLED, ProducerKeys.KERBEROS_ENABLED_DEFULAT);
defaultParameters.addArgument(ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG, ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.JAVA_SEC_KRB5_CONFIG, ProducerKeys.JAVA_SEC_KRB5_CONFIG_DEFAULT);
defaultParameters.addArgument(ProducerKeys.SASL_KERBEROS_SERVICE_NAME, ProducerKeys.SASL_KERBEROS_SERVICE_NAME_DEFAULT);
defaultParameters.addArgument(ProducerKeys.SASL_MECHANISM, ProducerKeys.SASL_MECHANISM_DEFAULT);
return defaultParameters;
}
示例3: main
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public static void main(String[] args) throws ExecutionException, InterruptedException {
Map props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections
// to the Kakfa cluster
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
"kafka-local:9092");
// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// value to block, after which it will throw a TimeoutException
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
AdminClient adminClient = AdminClient.create(props);
adminClient.describeCluster();
Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
System.out.println(topicListings);
}
示例4: run
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void run(Configuration configuration, Environment environment) throws Exception {
final CollectorRegistry collectorRegistry = new CollectorRegistry();
collectorRegistry.register(new DropwizardExports(environment.metrics()));
environment.admin()
.addServlet("metrics", new MetricsServlet(collectorRegistry))
.addMapping("/metrics");
final PrometheusMetricsReporter reporter = PrometheusMetricsReporter.newMetricsReporter()
.withCollectorRegistry(collectorRegistry)
.withConstLabel("service", getName())
.build();
final Tracer tracer = getTracer();
final Tracer metricsTracer = io.opentracing.contrib.metrics.Metrics.decorate(tracer, reporter);
GlobalTracer.register(metricsTracer);
final DynamicFeature tracing = new ServerTracingDynamicFeature.Builder(metricsTracer).build();
environment.jersey().register(tracing);
final Properties producerConfigs = new Properties();
producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "tweets-kafka:9092");
producerConfigs.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfigs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
final KafkaProducer<Long, String> kafkaProducer =
new KafkaProducer<>(producerConfigs, new LongSerializer(), new StringSerializer());
final Producer<Long, String> tracingKafkaProducer =
new TracingKafkaProducer<>(kafkaProducer, metricsTracer);
final ObjectMapper objectMapper = environment.getObjectMapper();
final TweetEventRepository tweetRepository = new KafkaTweetEventRepository(tracingKafkaProducer, objectMapper);
final TweetsService tweetsService = new TweetsService(tweetRepository);
final TweetsResource tweetsResource = new TweetsResource(tweetsService);
environment.jersey().register(tweetsResource);
}
示例5: createProducer
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void createProducer(String bootstrapServer) {
long numberOfEvents = 5;
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
KafkaProducer<String, String> producer = new KafkaProducer<>(
props);
for (int i = 0; i < numberOfEvents; i++) {
String key = "testContainers";
String value = "AreAwesome";
ProducerRecord<String, String> record = new ProducerRecord<>(
"hello_world_topic", key, value);
try {
producer.send(record).get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
System.out.printf("key = %s, value = %s\n", key, value);
}
producer.close();
}
示例6: KmqClient
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public KmqClient(KmqConfig config, KafkaClients clients,
Class<? extends Deserializer<K>> keyDeserializer,
Class<? extends Deserializer<V>> valueDeserializer,
long msgPollTimeout) {
this.config = config;
this.msgPollTimeout = msgPollTimeout;
this.msgConsumer = clients.createConsumer(config.getMsgConsumerGroupId(), keyDeserializer, valueDeserializer);
// Using the custom partitioner, each offset-partition will contain markers only from a single queue-partition.
this.markerProducer = clients.createProducer(
MarkerKey.MarkerKeySerializer.class, MarkerValue.MarkerValueSerializer.class,
Collections.singletonMap(ProducerConfig.PARTITIONER_CLASS_CONFIG, ParititionFromMarkerKey.class));
LOG.info(String.format("Subscribing to topic: %s, using group id: %s", config.getMsgTopic(), config.getMsgConsumerGroupId()));
msgConsumer.subscribe(Collections.singletonList(config.getMsgTopic()));
}
示例7: overridingProps
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
@Override
public Properties overridingProps() {
Properties props = new Properties();
int port = findLocalPort();
// We need to convert all the properties to the Cruise Control properties.
setSecurityConfigs(props, "producer");
for (String configName : ProducerConfig.configNames()) {
Object value = props.get(configName);
if (value != null) {
props.remove(configName);
props.put(appendPrefix(configName), value);
}
}
props.setProperty("metric.reporters", CruiseControlMetricsReporter.class.getName());
props.setProperty("listeners", "SSL://127.0.0.1:" + port);
props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), "127.0.0.1:" + port);
props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG), "SSL");
props.setProperty(CRUISE_CONTROL_METRICS_REPORTING_INTERVAL_MS_CONFIG, "100");
props.setProperty(CRUISE_CONTROL_METRICS_TOPIC_CONFIG, TOPIC);
return props;
}
示例8: setUp
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
@Before
public void setUp() {
super.setUp();
Properties props = new Properties();
props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
AtomicInteger failed = new AtomicInteger(0);
try (Producer<String, String> producer = createProducer(props)) {
for (int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
failed.incrementAndGet();
}
}
});
}
}
assertEquals(0, failed.get());
}
示例9: initialize
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void initialize(String servers) {
if (isInitialized.get()) {
logger.warn("Already initialized");
return;
}
Properties props = new Properties();
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, SixtPartitioner.class.getName());
props.put(ProducerConfig.RETRIES_CONFIG, "3");
props.put(ProducerConfig.ACKS_CONFIG, "all");
properties.forEach(props::put);
realProducer = new KafkaProducer<>(props);
isInitialized.set(true);
}
示例10: produce
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void produce(UUID key, Object value) {
ConfigurationService configService = ServiceLocator
.findService(ConfigurationService.class);
Properties kafkaProps = new Properties();
kafkaProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
configService.getVOConfigurationSetting(
ConfigurationKey.KAFKA_BOOTSTRAP_SERVERS, "global")
.getValue());
this.producer = new KafkaProducer<>(kafkaProps, new UUIDSerializer(),
new DataSerializer(value.getClass()));
try {
producer.send(new ProducerRecord<>(TOPIC, key, value));
} catch (Exception e) {
LOGGER.error("Producer closed");
e.printStackTrace();
} finally {
producer.close();
LOGGER.debug("Producer closed");
}
}
示例11: publishDummyData
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void publishDummyData() {
final String topic = "TestTopic";
// Create publisher
final Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
final KafkaProducer<String, String> producer = new KafkaProducer<>(config);
for (int charCode = 65; charCode < 91; charCode++) {
final char[] key = new char[1];
key[0] = (char) charCode;
producer.send(new ProducerRecord<>(topic, new String(key), new String(key)));
}
producer.flush();
producer.close();
}
示例12: publishDummyDataNumbers
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public void publishDummyDataNumbers() {
final String topic = "NumbersTopic";
// Create publisher
final Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(config);
for (int value = 0; value < 10000; value++) {
producer.send(new ProducerRecord<>(topic, value, value));
}
producer.flush();
producer.close();
}
示例13: Producer
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public Producer() {
LOGGER.log(Level.INFO, "Kafka Producer running in thread {0}", Thread.currentThread().getName());
Properties kafkaProps = new Properties();
String defaultClusterValue = "localhost:9092";
String kafkaCluster = System.getProperty(KAFKA_CLUSTER_ENV_VAR_NAME, defaultClusterValue);
LOGGER.log(Level.INFO, "Kafka cluster {0}", kafkaCluster);
kafkaProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster);
kafkaProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
kafkaProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
kafkaProps.put(ProducerConfig.ACKS_CONFIG, "0");
this.kafkaProducer = new KafkaProducer<>(kafkaProps);
}
示例14: KafkaLogbackAppender
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
public KafkaLogbackAppender(final Properties producerConfig,
final String topic) {
this.topic = topic;
// Build properties that can be used by the kafka producer
this.producerConfig = new Properties();
this.producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
this.producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
this.producerConfig.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
this.producerConfig.put(ProducerConfig.LINGER_MS_CONFIG, 1);
this.producerConfig.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.StringSerializer.class.getName());
this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.ByteArraySerializer.class.getName());
this.producerConfig.putAll(producerConfig);
}
示例15: produceRecords
import org.apache.kafka.clients.producer.ProducerConfig; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Long, byte[]> producer = new KafkaProducer<>(properties);
LongStream.rangeClosed(1, 100).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC, //topic
number, //key
String.format("record-%s", number.toString()).getBytes())) //value
.forEach(record -> producer.send(record));
producer.close();
}