當前位置: 首頁>>代碼示例>>Java>>正文


Java KafkaStreams.cleanUp方法代碼示例

本文整理匯總了Java中org.apache.kafka.streams.KafkaStreams.cleanUp方法的典型用法代碼示例。如果您正苦於以下問題:Java KafkaStreams.cleanUp方法的具體用法?Java KafkaStreams.cleanUp怎麽用?Java KafkaStreams.cleanUp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.kafka.streams.KafkaStreams的用法示例。


在下文中一共展示了KafkaStreams.cleanUp方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
    KeyStoreException, IOException, URISyntaxException {
  Properties streamsConfig = new AggregatorConfig().getProperties();

  final StreamsBuilder builder = new StreamsBuilder();

  final KStream<Windowed<String>, String> words =
      builder.stream(String.format("%swords", HEROKU_KAFKA_PREFIX));

  words
      .groupBy((key, word) -> word)
      .windowedBy(TimeWindows.of(TimeUnit.SECONDS.toMillis(10)))
      .count(Materialized.as("windowed-counts"))
      .toStream()
      .process(PostgresSink::new);

  final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);

  streams.cleanUp();
  streams.start();

  Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
開發者ID:kissaten,項目名稱:kafka-streams-on-heroku,代碼行數:24,代碼來源:Aggregator.java

示例2: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) {

        Properties config = new Properties();
        config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
        config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> kStream = builder.stream("streams-file-input");
        // do stuff
        kStream.to("streams-wordcount-output");

        KafkaStreams streams = new KafkaStreams(builder, config);
        streams.cleanUp(); // only do this in dev - not in prod
        streams.start();

        // print the topology
        System.out.println(streams.toString());

        // shutdown hook to correctly close the streams application
        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

    }
 
開發者ID:kaiwaehner,項目名稱:kafka-streams-machine-learning-examples,代碼行數:27,代碼來源:StreamsStarterApp.java

示例3: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streaming-example");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1500);

//        To get data produced before process started
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> source = builder.stream("data-in");

        KStream<String, String> stats = source.groupByKey()
                .aggregate(KafkaStreamingStatistics::new,
                    (k, v, clusterstats) -> clusterstats.add(v),
                    TimeWindows.of(60000).advanceBy(10000),
                    Serdes.serdeFrom(new MySerde(), new MySerde()),
                    "data-store")
                .toStream((key, value) -> key.key().toString() + " " + key.window().start())
                .mapValues((job) -> job.computeAvgTime().toString());

        stats.to(Serdes.String(), Serdes.String(),  "data-out");

        KafkaStreams streams = new KafkaStreams(builder, props);

        streams.cleanUp();
        streams.start();

        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    }
 
開發者ID:ebi-wp,項目名稱:kafka-streams-api-websockets,代碼行數:36,代碼來源:KafkaStreamingMain.java

示例4: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
    KeyStoreException, IOException, URISyntaxException {
  Properties streamsConfig = new AnomalyDetectorConfig().getProperties();

  final StreamsBuilder builder = new StreamsBuilder();

  final KStream<String, String> loglines =
      builder.stream( String.format("%sloglines", HEROKU_KAFKA_PREFIX));

  KStream<Windowed<String>, Long> anomalies = loglines
      .filter((key, value) -> value.contains("login failed"))
      .selectKey((key, value) -> value.split("\\|")[0])
      .groupByKey()
      .windowedBy(TimeWindows.of(TimeUnit.SECONDS.toMillis(10)))
      .count(Materialized.as("windowed-counts"))
      .toStream();

  @SuppressWarnings("unchecked")
  KStream<Windowed<String>, Long>[] branches = anomalies
      .branch(
          (key, value) -> value > 1,
          (key, value) -> value > 0
      );

  branches[0].process(AlertSink::new);
  branches[1].process(EmailSink::new);

  final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);

  streams.cleanUp();
  streams.start();

  Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
開發者ID:kissaten,項目名稱:kafka-streams-on-heroku,代碼行數:35,代碼來源:AnomalyDetector.java

示例5: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
    KeyStoreException, IOException, URISyntaxException {
  Properties streamsConfig = new TextProcessorConfig().getProperties();

  final Serde<String> stringSerde = Serdes.String();

  final StreamsBuilder builder = new StreamsBuilder();

  final KStream<String, String> textLines =
      builder.stream(String.format("%stextlines", HEROKU_KAFKA_PREFIX));

  final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS);

  textLines
      .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase())))
      .to(String.format("%swords", HEROKU_KAFKA_PREFIX), Produced.with(stringSerde, stringSerde));

  final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);

  streams.cleanUp();
  streams.start();

  Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
開發者ID:kissaten,項目名稱:kafka-streams-on-heroku,代碼行數:25,代碼來源:TextProcessor.java

示例6: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception{

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

        // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
        // Note: To re-run the demo, you need to use the offset reset tool:
        // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Streams+Application+Reset+Tool
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // work-around for an issue around timing of creating internal topics
        // Fixed in Kafka 0.10.2.0
        // don't use in large production apps - this increases network load
        // props.put(CommonClientConfigs.METADATA_MAX_AGE_CONFIG, 500);

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> source = builder.stream("wordcount-input");


        final Pattern pattern = Pattern.compile("\\W+");
        KStream counts  = source.flatMapValues(value-> Arrays.asList(pattern.split(value.toLowerCase())))
                .map((key, value) -> new KeyValue<Object, Object>(value, value))
                .filter((key, value) -> (!value.equals("the")))
                .groupByKey()
                .count("CountStore").mapValues(value->Long.toString(value)).toStream();
        counts.to("wordcount-output");

        KafkaStreams streams = new KafkaStreams(builder, props);

        // This is for reset to work. Don't use in production - it causes the app to re-load the state from Kafka on every start
        streams.cleanUp();

        streams.start();

        // usually the stream application would be running forever,
        // in this example we just let it run for some time and stop since the input data is finite.
        Thread.sleep(5000L);

        streams.close();

    }
 
開發者ID:gwenshap,項目名稱:kafka-streams-wordcount,代碼行數:46,代碼來源:WordCountExample.java

示例7: main

import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(String[] args) {
    Properties streamsConfiguration = new Properties();

    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.77.7:9092,192.168.77.7:9093,192.168.77.7:9094");

    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "simple-stream-demo");
    streamsConfiguration.put(StreamsConfig.CLIENT_ID_CONFIG, "simple-stream-demo-client");

    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
    streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

    final StreamsBuilder builder = new StreamsBuilder();

    final KStream<String, String> textLines = builder.stream("test");

    textLines.filter((key, value) -> Integer.valueOf(value) % 2 == 0).print(Printed.toSysOut());

    final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);

    streams.cleanUp();
    streams.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

}
 
開發者ID:bpark,項目名稱:kafka-docker-demo,代碼行數:29,代碼來源:SimpleStreamsDemo.java


注:本文中的org.apache.kafka.streams.KafkaStreams.cleanUp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。