本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerConfig类的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConfig类的具体用法?Java ConsumerConfig怎么用?Java ConsumerConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ConsumerConfig类属于org.apache.kafka.clients.consumer包,在下文中一共展示了ConsumerConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createSystemConsumer
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
private void createSystemConsumer(String name, MessageListener<String, String> consumeEvent) {
log.info("Creating kafka consumer for topic {}", name);
ContainerProperties containerProps = new ContainerProperties(name);
Map<String, Object> props = kafkaProperties.buildConsumerProperties();
if (name.equals(applicationProperties.getKafkaSystemTopic())) {
props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
}
ConsumerFactory<String, String> factory = new DefaultKafkaConsumerFactory<>(props);
ConcurrentMessageListenerContainer<String, String> container =
new ConcurrentMessageListenerContainer<>(factory, containerProps);
container.setupMessageListener(consumeEvent);
container.start();
log.info("Successfully created kafka consumer for topic {}", name);
}
示例2: setup
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Before
public void setup() {
this.time = new MockTime();
this.subscriptions = new SubscriptionState(OffsetResetStrategy.EARLIEST);
this.metadata = new Metadata(0, Long.MAX_VALUE, true);
this.metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
this.client = new MockClient(time, metadata);
this.consumerClient = new ConsumerNetworkClient(client, metadata, time, 100, 1000);
this.metrics = new Metrics(time);
this.rebalanceListener = new MockRebalanceListener();
this.mockOffsetCommitCallback = new MockCommitCallback();
this.partitionAssignor.clear();
client.setNode(node);
this.coordinator = buildCoordinator(metrics, assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, autoCommitEnabled, true);
}
示例3: getConsumer
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
private KafkaConsumer<String, Serializable> getConsumer(String groupId) {
KafkaConsumer<String, Serializable> kafkaConsumer = null;
if ((kafkaConsumer = kafkaConsumers.get(groupId)) != null)
return kafkaConsumer;
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
kafkaConsumer = new KafkaConsumer<String, Serializable>(properties);
kafkaConsumers.put(groupId, kafkaConsumer);
return kafkaConsumer;
}
示例4: execute
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Override
public void execute(ServiceContext ctx) throws Exception {
active = true;
receivedIds = new GridConcurrentHashSet<>();
Properties config = new Properties();
config.putAll(dataRecoveryConfig.getConsumerConfig());
config.put(ConsumerConfig.GROUP_ID_CONFIG, ReceivedTransactionsListenerImpl.class.getSimpleName());
config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
try (Consumer<ByteBuffer, ByteBuffer> consumer = kafkaFactory.consumer(config)) {
consumer.subscribe(Arrays.asList(dataRecoveryConfig.getRemoteTopic(), dataRecoveryConfig.getReconciliationTopic()));
while (active) {
ConsumerRecords<ByteBuffer, ByteBuffer> poll = consumer.poll(500);
for (ConsumerRecord<ByteBuffer, ByteBuffer> record : poll) {
TransactionMetadata metadata = serializer.deserialize(record.key());
receivedIds.add(metadata.getTransactionId());
}
consumer.commitSync();
}
}
}
示例5: main
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
// setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KStreamBuilder builder = new KStreamBuilder();
builder.stream("streams-file-input").to("streams-pipe-output");
KafkaStreams streams = new KafkaStreams(builder, props);
streams.start();
// usually the stream application would be running forever,
// in this example we just let it run for some time and stop since the input data is finite.
Thread.sleep(5000L);
streams.close();
}
示例6: builder
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
public KafkaSpoutConfig.Builder<String, String> builder(String bootstrapServers, String topic, Class klass) {
Map<String, Object> props = new HashMap<>();
if (bootstrapServers == null || bootstrapServers.isEmpty()) {
throw new IllegalArgumentException("bootstrap servers cannot be null");
}
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(KafkaSpoutConfig.Consumer.GROUP_ID, groupId);
props.put(KafkaSpoutConfig.Consumer.KEY_DESERIALIZER,
keyDeserializer);
props.put(KafkaSpoutConfig.Consumer.VALUE_DESERIALIZER,
valueDeserializer);
props.put(KafkaSpoutConfig.Consumer.ENABLE_AUTO_COMMIT, String.valueOf(enableAutoCommit));
KafkaSpoutStreams streams = new KafkaSpoutStreamsNamedTopics.Builder(
new KafkaSpoutStream(getFields(klass), topic)).build();
KafkaSpoutTuplesBuilder<String, String> tuplesBuilder = new KafkaSpoutTuplesBuilderNamedTopics.Builder<String, String>(
new TuplesBuilder(topic, klass)).build();
return new KafkaSpoutConfig.Builder<>(props, streams, tuplesBuilder);
}
示例7: main
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
public static void main(String[] args) {
Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> kStream = builder.stream("streams-file-input");
// do stuff
kStream.to("streams-wordcount-output");
KafkaStreams streams = new KafkaStreams(builder, config);
streams.cleanUp(); // only do this in dev - not in prod
streams.start();
// print the topology
System.out.println(streams.toString());
// shutdown hook to correctly close the streams application
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例8: testAutoCommitManualAssignment
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Test
public void testAutoCommitManualAssignment() {
ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors,
ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true);
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 100);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady();
client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE)));
time.sleep(autoCommitIntervalMs);
coordinator.poll(time.milliseconds(), Long.MAX_VALUE);
assertEquals(100L, subscriptions.committed(t1p).offset());
}
示例9: buildStreamsConfig
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
private StreamsConfig buildStreamsConfig(String appId, final Map<String, Object> additionalProps) {
Map<String, Object> props = new HashMap<>();
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaBootstrapServers());
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, getStreamsCommitIntervalMs());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, getAutoOffsetReset());
//TODO not clear if this is needed for not. Normal Kafka doesn't need it but streams may do
//leaving it in seems to cause zookeeper connection warnings in the tests. Tests seem to work ok
//without it
// props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeperConfig.getQuorum());
//Add any additional props, overwriting any from above
props.putAll(additionalProps);
props.forEach((key, value) ->
LOGGER.info("Setting Kafka Streams property {} for appId {} to [{}]", key, appId, value.toString())
);
return new StreamsConfig(props);
}
示例10: translateOldProperties
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
private void translateOldProperties(Context ctx) {
// topic
String topic = context.getString(KafkaSourceConstants.TOPIC);
if (topic != null && !topic.isEmpty()) {
subscriber = new TopicListSubscriber(topic);
log.warn("{} is deprecated. Please use the parameter {}",
KafkaSourceConstants.TOPIC, KafkaSourceConstants.TOPICS);
}
// old groupId
groupId = ctx.getString(KafkaSourceConstants.OLD_GROUP_ID);
if (groupId != null && !groupId.isEmpty()) {
log.warn("{} is deprecated. Please use the parameter {}",
KafkaSourceConstants.OLD_GROUP_ID,
KafkaSourceConstants.KAFKA_CONSUMER_PREFIX + ConsumerConfig.GROUP_ID_CONFIG);
}
}
示例11: testAutoCommitManualAssignmentCoordinatorUnknown
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Test
public void testAutoCommitManualAssignmentCoordinatorUnknown() {
ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors,
ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true);
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 100);
// no commit initially since coordinator is unknown
consumerClient.poll(0);
time.sleep(autoCommitIntervalMs);
consumerClient.poll(0);
assertNull(subscriptions.committed(t1p));
// now find the coordinator
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady();
// sleep only for the retry backoff
time.sleep(retryBackoffMs);
client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE)));
coordinator.poll(time.milliseconds(), Long.MAX_VALUE);
assertEquals(100L, subscriptions.committed(t1p).offset());
}
示例12: testOldConfig
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Test
public void testOldConfig() throws Exception {
Context context = new Context();
context.put(BROKER_LIST_FLUME_KEY,testUtil.getKafkaServerUrl());
context.put(GROUP_ID_FLUME,"flume-something");
context.put(READ_SMALLEST_OFFSET,"true");
context.put("topic",topic);
final KafkaChannel channel = new KafkaChannel();
Configurables.configure(channel, context);
Properties consumerProps = channel.getConsumerProps();
Properties producerProps = channel.getProducerProps();
Assert.assertEquals(producerProps.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
testUtil.getKafkaServerUrl());
Assert.assertEquals(consumerProps.getProperty(ConsumerConfig.GROUP_ID_CONFIG),
"flume-something");
Assert.assertEquals(consumerProps.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG),
"earliest");
}
示例13: fetch
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public List<EntityCommand<?>> fetch(String txId) {
List<EntityCommand<?>> transactionOperations = new ArrayList<EntityCommand<?>>();
Map<String, Object> consumerConfigs = (Map<String, Object>)configuration.get("kafkaConsumerConfiguration");
consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(consumerConfigs);
kafkaConsumer.subscribe(Arrays.asList(txId));
ConsumerRecords<String, String> records = kafkaConsumer.poll(kafkaConsumerPollTimeout);
for (ConsumerRecord<String, String> record : records){
LOG.info("offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
try {
transactionOperations.add(serializer.readFromString(record.value()));
} catch (SerializationFailedException e) {
LOG.error("Unable to deserialize [{}] because of: {}", record.value(), e.getMessage());
}
}
kafkaConsumer.close();
return transactionOperations;
}
开发者ID:jotorren,项目名称:microservices-transactions-tcc,代码行数:26,代码来源:CompositeTransactionManagerKafkaImpl.java
示例14: consumerFactory
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, group);
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
return new DefaultKafkaConsumerFactory<String, String>(properties);
}
示例15: kafkaConsumerConfig
import org.apache.kafka.clients.consumer.ConsumerConfig; //导入依赖的package包/类
@Bean
public Map<String, Object> kafkaConsumerConfig() {
final Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.GROUP_ID_CONFIG, this.kafkaGroupId);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaHosts);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, this.kafkaAutoCommit);
if (this.kafkaAutoCommit) {
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, this.kafkaAutoCommitInterval);
}
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, this.kafkaSessionTimeout);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, this.kafkaKeyDeserializerClass);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, this.kafkaValueDeserializerClass);
props.put(Constants.KAFKA_POLL_TIMEOUT, this.kafkaPollTimeout);
props.put(Constants.KAFKA_SUBSCRIBED_TOPICS, this.kafkaSubscribedTopics);
return props;
}