本文整理匯總了Java中org.apache.kafka.streams.StreamsConfig類的典型用法代碼示例。如果您正苦於以下問題:Java StreamsConfig類的具體用法?Java StreamsConfig怎麽用?Java StreamsConfig使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
StreamsConfig類屬於org.apache.kafka.streams包,在下文中一共展示了StreamsConfig類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildStreamsConfig
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
private StreamsConfig buildStreamsConfig(String appId, final Map<String, Object> additionalProps) {
Map<String, Object> props = new HashMap<>();
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaBootstrapServers());
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, getStreamsCommitIntervalMs());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, getAutoOffsetReset());
//TODO not clear if this is needed for not. Normal Kafka doesn't need it but streams may do
//leaving it in seems to cause zookeeper connection warnings in the tests. Tests seem to work ok
//without it
// props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeperConfig.getQuorum());
//Add any additional props, overwriting any from above
props.putAll(additionalProps);
props.forEach((key, value) ->
LOGGER.info("Setting Kafka Streams property {} for appId {} to [{}]", key, appId, value.toString())
);
return new StreamsConfig(props);
}
示例2: buildKafkaStreamsDefaults
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
private Properties buildKafkaStreamsDefaults() {
Properties properties = new Properties();
properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
String.format("%stext-processor-app", HEROKU_KAFKA_PREFIX));
properties.put(StreamsConfig.CLIENT_ID_CONFIG,
String.format("%stext-processor-client", HEROKU_KAFKA_PREFIX));
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(
StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
Serdes.String().getClass().getName());
properties.put(
StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
Serdes.String().getClass().getName());
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
properties.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG,
WallclockTimestampExtractor.class);
return properties;
}
示例3: shouldAddUserDefinedEndPointToSubscription
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
@Test
public void shouldAddUserDefinedEndPointToSubscription() throws Exception {
final Properties properties = configProps();
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:8080");
final StreamsConfig config = new StreamsConfig(properties);
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
builder.addSource("source", "input");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", "processor");
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
final PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("input"));
final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
assertEquals("localhost:8080", subscriptionInfo.userEndPoint);
}
示例4: main
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Properties kafkaStreamProperties = new Properties();
kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
Serde<String> stringSerde = Serdes.String();
Serde<Long> longSerde = Serdes.Long();
KStreamBuilder streamTopology = new KStreamBuilder();
KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
KStream<String, Long> wordCounts = topicRecords
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
.map((key, word) -> new KeyValue<>(word, word))
.countByKey("Count")
.toStream();
wordCounts.to(stringSerde, longSerde, "wordCount");
KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
streamManager.start();
Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
開發者ID:PacktPublishing,項目名稱:Building-Data-Streaming-Applications-with-Apache-Kafka,代碼行數:26,代碼來源:KafkaStreamWordCount.java
示例5: main
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
public static void main(String[] args) {
Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> kStream = builder.stream("streams-file-input");
// do stuff
kStream.to("streams-wordcount-output");
KafkaStreams streams = new KafkaStreams(builder, config);
streams.cleanUp(); // only do this in dev - not in prod
streams.start();
// print the topology
System.out.println(streams.toString());
// shutdown hook to correctly close the streams application
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例6: init
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
@PostConstruct
public void init() {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-repo");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:9092");
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper:2181");
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://schema-registry:8081");
KStreamBuilder builder = new KStreamBuilder();
builder.table(Serdes.Long(), Serdes.String(), "processed-tweets", STORE_NAME);
streams = new KafkaStreams(builder, props);
streams.start();
}
示例7: init
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
@PostConstruct
public void init() {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-processor");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:9092");
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper:2181");
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://schema-registry:8081");
KStreamBuilder builder = new KStreamBuilder();
builder.stream("tweets")
.map((k, v) -> {
Tweet tweet = (Tweet) SpecificData.get().deepCopy(Tweet.getClassSchema(), v);
return new KeyValue<>(tweet.getId(), tweet.getText().toString());
})
.to(Serdes.Long(), Serdes.String(), "processed-tweets");
KafkaStreams streams = new KafkaStreams(builder, props);
streams.start();
}
示例8: StandbyTask
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
/**
* Create {@link StandbyTask} with its assigned partitions
*
* @param id the ID of this task
* @param applicationId the ID of the stream processing application
* @param partitions the collection of assigned {@link TopicPartition}
* @param topology the instance of {@link ProcessorTopology}
* @param consumer the instance of {@link Consumer}
* @param config the {@link StreamsConfig} specified by the user
* @param metrics the {@link StreamsMetrics} created by the thread
* @param stateDirectory the {@link StateDirectory} created by the thread
*/
StandbyTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory) {
super(id, applicationId, partitions, topology, consumer, changelogReader, true, stateDirectory, null, config);
// initialize the topology with its own context
processorContext = new StandbyContextImpl(id, applicationId, config, stateMgr, metrics);
log.debug("{} Initializing", logPrefix);
initializeStateStores();
processorContext.initialized();
checkpointedOffsets = Collections.unmodifiableMap(stateMgr.checkpointed());
}
示例9: GlobalStreamThread
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
public GlobalStreamThread(final ProcessorTopology topology,
final StreamsConfig config,
final Consumer<byte[], byte[]> globalConsumer,
final StateDirectory stateDirectory,
final Metrics metrics,
final Time time,
final String threadClientId) {
super(threadClientId);
this.time = time;
this.config = config;
this.topology = topology;
this.consumer = globalConsumer;
this.stateDirectory = stateDirectory;
long cacheSizeBytes = Math.max(0, config.getLong(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) /
(config.getInt(StreamsConfig.NUM_STREAM_THREADS_CONFIG) + 1));
this.streamsMetrics = new StreamsMetricsImpl(metrics, threadClientId, Collections.singletonMap("client-id", threadClientId));
this.cache = new ThreadCache(threadClientId, cacheSizeBytes, streamsMetrics);
}
示例10: getStreamsConfig
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
public static Properties getStreamsConfig(final String applicationId,
final String bootstrapServers,
final String keySerdeClassName,
final String valueSerdeClassName,
final Properties additional) {
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
streamsConfiguration.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, keySerdeClassName);
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, valueSerdeClassName);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
streamsConfiguration.putAll(additional);
return streamsConfiguration;
}
示例11: commitInvalidOffsets
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
private void commitInvalidOffsets() {
final KafkaConsumer consumer = new KafkaConsumer(TestUtils.consumerConfig(
CLUSTER.bootstrapServers(),
streamsConfiguration.getProperty(StreamsConfig.APPLICATION_ID_CONFIG),
StringDeserializer.class,
StringDeserializer.class));
final Map<TopicPartition, OffsetAndMetadata> invalidOffsets = new HashMap<>();
invalidOffsets.put(new TopicPartition(TOPIC_1_2, 0), new OffsetAndMetadata(5, null));
invalidOffsets.put(new TopicPartition(TOPIC_2_2, 0), new OffsetAndMetadata(5, null));
invalidOffsets.put(new TopicPartition(TOPIC_A_2, 0), new OffsetAndMetadata(5, null));
invalidOffsets.put(new TopicPartition(TOPIC_C_2, 0), new OffsetAndMetadata(5, null));
invalidOffsets.put(new TopicPartition(TOPIC_Y_2, 0), new OffsetAndMetadata(5, null));
invalidOffsets.put(new TopicPartition(TOPIC_Z_2, 0), new OffsetAndMetadata(5, null));
consumer.commitSync(invalidOffsets);
consumer.close();
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:20,代碼來源:KStreamsFineGrainedAutoResetIntegrationTest.java
示例12: buildKafkaStreamsDefaults
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
private Properties buildKafkaStreamsDefaults() {
Properties properties = new Properties();
properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
String.format("%sanomaly-detector-app", HEROKU_KAFKA_PREFIX));
properties.put(StreamsConfig.CLIENT_ID_CONFIG,
String.format("%sanomaly-detector-client", HEROKU_KAFKA_PREFIX));
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(
StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
Serdes.String().getClass().getName());
properties.put(
StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
Serdes.String().getClass().getName());
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
properties.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG,
WallclockTimestampExtractor.class);
return properties;
}
示例13: before
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
@Before
public void before() throws InterruptedException {
testNo++;
String applicationId = "kstream-repartition-join-test-" + testNo;
builder = new KStreamBuilder();
createTopics();
streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 3);
streamsConfiguration.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);
streamOne = builder.stream(Serdes.Long(), Serdes.Integer(), streamOneInput);
streamTwo = builder.stream(Serdes.Integer(), Serdes.String(), streamTwoInput);
streamFour = builder.stream(Serdes.Integer(), Serdes.String(), streamFourInput);
keyMapper = MockKeyValueMapper.SelectValueKeyValueMapper();
}
示例14: verifyRepartitionOnJoinOperations
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
private void verifyRepartitionOnJoinOperations(final int cacheSizeBytes) throws Exception {
streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, cacheSizeBytes);
produceMessages();
final ExpectedOutputOnTopic mapOne = mapStreamOneAndJoin();
final ExpectedOutputOnTopic mapBoth = mapBothStreamsAndJoin();
final ExpectedOutputOnTopic mapMapJoin = mapMapJoin();
final ExpectedOutputOnTopic selectKeyJoin = selectKeyAndJoin();
final ExpectedOutputOnTopic flatMapJoin = flatMapJoin();
final ExpectedOutputOnTopic mapRhs = joinMappedRhsStream();
final ExpectedOutputOnTopic mapJoinJoin = joinTwoMappedStreamsOneThatHasBeenPreviouslyJoined();
final ExpectedOutputOnTopic leftJoin = mapBothStreamsAndLeftJoin();
startStreams();
verifyCorrectOutput(mapOne);
verifyCorrectOutput(mapBoth);
verifyCorrectOutput(mapMapJoin);
verifyCorrectOutput(selectKeyJoin);
verifyCorrectOutput(flatMapJoin);
verifyCorrectOutput(mapRhs);
verifyCorrectOutput(mapJoinJoin);
verifyCorrectOutput(leftJoin);
}
示例15: before
import org.apache.kafka.streams.StreamsConfig; //導入依賴的package包/類
@Before
public void before() throws InterruptedException {
testNo++;
userClicksTopic = "user-clicks-" + testNo;
userRegionsTopic = "user-regions-" + testNo;
userRegionsStoreName = "user-regions-store-name-" + testNo;
outputTopic = "output-topic-" + testNo;
CLUSTER.createTopics(userClicksTopic, userRegionsTopic, outputTopic);
streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "join-integration-test-" + testNo);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG,
TestUtils.tempDirectory().getPath());
streamsConfiguration.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:22,代碼來源:KStreamKTableJoinIntegrationTest.java