本文整理汇总了Java中org.apache.kafka.streams.kstream.TimeWindows类的典型用法代码示例。如果您正苦于以下问题:Java TimeWindows类的具体用法?Java TimeWindows怎么用?Java TimeWindows使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TimeWindows类属于org.apache.kafka.streams.kstream包,在下文中一共展示了TimeWindows类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
KeyStoreException, IOException, URISyntaxException {
Properties streamsConfig = new AggregatorConfig().getProperties();
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Windowed<String>, String> words =
builder.stream(String.format("%swords", HEROKU_KAFKA_PREFIX));
words
.groupBy((key, word) -> word)
.windowedBy(TimeWindows.of(TimeUnit.SECONDS.toMillis(10)))
.count(Materialized.as("windowed-counts"))
.toStream()
.process(PostgresSink::new);
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);
streams.cleanUp();
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例2: main
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
public static void main(String[] args) {
StreamsBuilder builder = new StreamsBuilder();
KStream<Long, byte[]> order = builder.stream(MallConstants.ORDER_COMMITED_TOPIC);
KeyValueMapper<Long, byte[], Long> userSelector = new KeyValueMapper<Long, byte[], Long>() {
@Override
public Long apply(Long key, byte[] value) {
Order o = JsonSerializable.decode(value, Order.class);
return null == o ? null : o.getUserId();
}
};
long sizeMs = 30 * 24 * 60 * 60 * 1000;// 30 day
order.groupBy(userSelector, Serialized.with(new LongSerde(), new Serdes.ByteArraySerde()))//
.windowedBy(TimeWindows.of(sizeMs))//
.aggregate(() -> 0L, (aggKey, value, aggregate) -> aggregate + 1L)//
.toStream()//
.to(OrderConstants.USER_ORDER_COUNT_TOPIC);
}
示例3: createCountStream
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
/**
* Creates a typical word count topology
*/
private KafkaStreams createCountStream(final String inputTopic, final String outputTopic, final Properties streamsConfiguration) {
final KStreamBuilder builder = new KStreamBuilder();
final Serde<String> stringSerde = Serdes.String();
final KStream<String, String> textLines = builder.stream(stringSerde, stringSerde, inputTopic);
final KGroupedStream<String, String> groupedByWord = textLines
.flatMapValues(new ValueMapper<String, Iterable<String>>() {
@Override
public Iterable<String> apply(final String value) {
return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"));
}
})
.groupBy(MockKeyValueMapper.<String, String>SelectValueMapper());
// Create a State Store for the all time word count
groupedByWord.count("word-count-store-" + inputTopic).to(Serdes.String(), Serdes.Long(), outputTopic);
// Create a Windowed State Store that contains the word count for every 1 minute
groupedByWord.count(TimeWindows.of(WINDOW_SIZE), "windowed-word-count-store-" + inputTopic);
return new KafkaStreams(builder, streamsConfiguration);
}
示例4: start
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Override
public void start(Future<Void> startFuture) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kiqr");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class);
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.LongSerde.class);
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0");
KStreamBuilder builder = new KStreamBuilder();
KTable<String, Long> table = builder.table(Serdes.String(), Serdes.Long(), "visits", "visitStore");
KTable<Windowed<String>, Long> windowedCount = table.toStream().groupByKey().count(TimeWindows.of(60), "visitCount");
vertx.deployVerticle(RestKiqrServerVerticle.Builder.serverBuilder(builder, props).withPort(2901).build(), res -> {
if (res.succeeded()) {
startFuture.complete();
} else {
startFuture.fail(res.cause());
}
});
}
示例5: configure
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see com.heliosapm.streams.metrics.router.nodes.MetricStreamNode#configure(org.apache.kafka.streams.kstream.KStreamBuilder)
*/
@Override
public void configure(final KStreamBuilder streamBuilder) {
rateDivisor = TimeUnit.MILLISECONDS.toSeconds(windowDuration);
KStream<String, StreamedMetric> rawMetrics = streamBuilder.stream(HeliosSerdes.STRING_SERDE, HeliosSerdes.STREAMED_METRIC_SERDE, sourceTopics);
window = rawMetrics
.aggregateByKey(new SMAggInit(), new SMAgg(), TimeWindows.of(STORE_NAME, windowDuration), HeliosSerdes.STRING_SERDE, HeliosSerdes.STREAMED_METRIC_VALUE_SERDE);
window.toStream()
.flatMap(new KeyValueMapper<Windowed<String>, StreamedMetricValue, Iterable<KeyValue<String,StreamedMetricValue>>>() {
protected final NonBlockingHashMap<String, NonBlockingHashMap<Window, StreamedMetricValue>> lastEntry = newlastEntry();
@Override
public Iterable<KeyValue<String, StreamedMetricValue>> apply(final Windowed<String> key, final StreamedMetricValue value) {
NonBlockingHashMap<Window, StreamedMetricValue> newMap = lastEntry.putIfAbsent(key.key(), NBHM_PLACEHOLDER);
if(newMap==null || newMap==NBHM_PLACEHOLDER) {
newMap = new NonBlockingHashMap<Window, StreamedMetricValue>();
lastEntry.replace(key.key(), newMap);
}
newMap.put(key.window(), value);
return Collections.singletonList(new KeyValue<String, StreamedMetricValue>(key.key(), value));
}
});
if(System.getProperties().containsKey("streams.debug")) {
streamBuilder.stream(HeliosSerdes.STRING_SERDE, HeliosSerdes.STREAMED_METRIC_SERDE, sinkTopic)
.foreach((k,v) -> System.err.println("[" + new Date() + "] WWWW: [" + new Date(v.getTimestamp()) + "] [" + v.metricKey() + "]:" + v.forValue(0D).getValueNumber()));
}
}
示例6: shouldCreateTumblingWindowAggregate
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Test
public void shouldCreateTumblingWindowAggregate() {
final KGroupedStream stream = EasyMock.createNiceMock(KGroupedStream.class);
final TimeWindowedKStream windowedKStream = EasyMock.createNiceMock(TimeWindowedKStream.class);
final UdafAggregator aggregator = EasyMock.createNiceMock(UdafAggregator.class);
final TumblingWindowExpression windowExpression = new TumblingWindowExpression(10, TimeUnit.SECONDS);
final Initializer initializer = () -> 0;
final Materialized<String, GenericRow, WindowStore<Bytes, byte[]>> store = Materialized.as("store");
EasyMock.expect(stream.windowedBy(TimeWindows.of(10000L))).andReturn(windowedKStream);
EasyMock.expect(windowedKStream.aggregate(same(initializer), same(aggregator), same(store))).andReturn(null);
EasyMock.replay(stream, windowedKStream);
windowExpression.applyAggregate(stream, initializer, aggregator, store);
EasyMock.verify(stream, windowedKStream);
}
示例7: shouldCreateHoppingWindowAggregate
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Test
public void shouldCreateHoppingWindowAggregate() {
final KGroupedStream stream = EasyMock.createNiceMock(KGroupedStream.class);
final TimeWindowedKStream windowedKStream = EasyMock.createNiceMock(TimeWindowedKStream.class);
final UdafAggregator aggregator = EasyMock.createNiceMock(UdafAggregator.class);
final HoppingWindowExpression windowExpression = new HoppingWindowExpression(10, TimeUnit.SECONDS, 4, TimeUnit.MILLISECONDS);
final Initializer initializer = () -> 0;
final Materialized<String, GenericRow, WindowStore<Bytes, byte[]>> store = Materialized.as("store");
EasyMock.expect(stream.windowedBy(TimeWindows.of(10000L).advanceBy(4L))).andReturn(windowedKStream);
EasyMock.expect(windowedKStream.aggregate(same(initializer), same(aggregator), same(store))).andReturn(null);
EasyMock.replay(stream, windowedKStream);
windowExpression.applyAggregate(stream, initializer, aggregator, store);
EasyMock.verify(stream, windowedKStream);
}
示例8: main
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streaming-example");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1500);
// To get data produced before process started
// props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> source = builder.stream("data-in");
KStream<String, String> stats = source.groupByKey()
.aggregate(KafkaStreamingStatistics::new,
(k, v, clusterstats) -> clusterstats.add(v),
TimeWindows.of(60000).advanceBy(10000),
Serdes.serdeFrom(new MySerde(), new MySerde()),
"data-store")
.toStream((key, value) -> key.key().toString() + " " + key.window().start())
.mapValues((job) -> job.computeAvgTime().toString());
stats.to(Serdes.String(), Serdes.String(), "data-out");
KafkaStreams streams = new KafkaStreams(builder, props);
streams.cleanUp();
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例9: shouldCountWindowed
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Test
public void shouldCountWindowed() throws Exception {
final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
groupedStream.count(
TimeWindows.of(500L),
"aggregate-by-key-windowed")
.foreach(new ForeachAction<Windowed<String>, Long>() {
@Override
public void apply(final Windowed<String> key, final Long value) {
results.add(KeyValue.pair(key, value));
}
});
doCountWindowed(results);
}
示例10: shouldCountWindowedWithInternalStoreName
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Test
public void shouldCountWindowedWithInternalStoreName() throws Exception {
final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
groupedStream.count(
TimeWindows.of(500L))
.foreach(new ForeachAction<Windowed<String>, Long>() {
@Override
public void apply(final Windowed<String> key, final Long value) {
results.add(KeyValue.pair(key, value));
}
});
doCountWindowed(results);
}
示例11: main
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
KeyStoreException, IOException, URISyntaxException {
Properties streamsConfig = new AnomalyDetectorConfig().getProperties();
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> loglines =
builder.stream( String.format("%sloglines", HEROKU_KAFKA_PREFIX));
KStream<Windowed<String>, Long> anomalies = loglines
.filter((key, value) -> value.contains("login failed"))
.selectKey((key, value) -> value.split("\\|")[0])
.groupByKey()
.windowedBy(TimeWindows.of(TimeUnit.SECONDS.toMillis(10)))
.count(Materialized.as("windowed-counts"))
.toStream();
@SuppressWarnings("unchecked")
KStream<Windowed<String>, Long>[] branches = anomalies
.branch(
(key, value) -> value > 1,
(key, value) -> value > 0
);
branches[0].process(AlertSink::new);
branches[1].process(EmailSink::new);
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);
streams.cleanUp();
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例12: windowStateTopology
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
public static KStreamBuilder windowStateTopology(KStreamBuilder builder) {
builder.stream(stringSerde, integerSerde, WINDOW_TOPIC)
.groupByKey(stringSerde, integerSerde)
.count(TimeWindows.of(1), TEST_STORE);
return builder;
}
示例13: configuredTimeWindow
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@Bean
@ConditionalOnProperty("spring.cloud.stream.kstream.timeWindow.length")
public TimeWindows configuredTimeWindow(KStreamApplicationSupportProperties processorProperties) {
return processorProperties.getTimeWindow().getAdvanceBy() > 0
? TimeWindows.of(processorProperties.getTimeWindow().getLength()).advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
}
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:8,代码来源:KStreamApplicationSupportAutoConfiguration.java
示例14: process
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@StreamListener("input")
@SendTo("output")
public KStream<Integer, String> process(KStream<Object, Product> input) {
return input
.filter((key, product) -> product.getId() == 123)
.map((key, value) -> new KeyValue<>(value, value))
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
.count(TimeWindows.of(5000), "id-count-store")
.toStream()
.map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value));
}
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:13,代码来源:KstreamBinderPojoInputStringOutputIntegrationTests.java
示例15: process
import org.apache.kafka.streams.kstream.TimeWindows; //导入依赖的package包/类
@StreamListener("input")
@SendTo("output")
public KStream<Integer, Long> process(KStream<Object, Product> input) {
return input
.filter((key, product) -> product.getId() == 123)
.map((key, value) -> new KeyValue<>(value, value))
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
.count(TimeWindows.of(5000), "id-count-store")
.toStream()
.map((key, value) -> new KeyValue<>(key.key().id, value));
}
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-kafka,代码行数:12,代码来源:KStreamBinderPojoInputAndPrimitiveTypeOutputTests.java