当前位置: 首页>>代码示例>>Java>>正文


Java Serdes类代码示例

本文整理汇总了Java中org.apache.kafka.common.serialization.Serdes的典型用法代码示例。如果您正苦于以下问题:Java Serdes类的具体用法?Java Serdes怎么用?Java Serdes使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Serdes类属于org.apache.kafka.common.serialization包,在下文中一共展示了Serdes类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    KStreamBuilder builder = new KStreamBuilder();

    builder.stream("streams-file-input").to("streams-pipe-output");

    KafkaStreams streams = new KafkaStreams(builder, props);
    streams.start();

    // usually the stream application would be running forever,
    // in this example we just let it run for some time and stop since the input data is finite.
    Thread.sleep(5000L);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:PipeDemo.java

示例2: createIterator

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
private MergedSortedCacheWindowStoreKeyValueIterator<String, String> createIterator(
    final Iterator<KeyValue<Windowed<Bytes>, byte[]>> storeKvs,
    final Iterator<KeyValue<Bytes, LRUCacheEntry>> cacheKvs
) {
    final DelegatingPeekingKeyValueIterator<Windowed<Bytes>, byte[]> storeIterator
        = new DelegatingPeekingKeyValueIterator<>("store", new KeyValueIteratorStub<>(storeKvs));

    final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator
        = new DelegatingPeekingKeyValueIterator<>("cache", new KeyValueIteratorStub<>(cacheKvs));
    return new MergedSortedCacheWindowStoreKeyValueIterator<>(
        cacheIterator,
        storeIterator,
        new StateSerdes<>("name", Serdes.String(), Serdes.String()),
        WINDOW_SIZE,
        SINGLE_SEGMENT_CACHE_FUNCTION
    );
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:MergedSortedCacheWrappedWindowStoreKeyValueIteratorTest.java

示例3: defaultSerdeShouldBeConfigured

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Test
public void defaultSerdeShouldBeConfigured() {
    final Map<String, Object> serializerConfigs = new HashMap<>();
    serializerConfigs.put("key.serializer.encoding", "UTF8");
    serializerConfigs.put("value.serializer.encoding", "UTF-16");
    final Serializer<String> serializer = Serdes.String().serializer();

    final String str = "my string for testing";
    final String topic = "my topic";

    serializer.configure(serializerConfigs, true);
    assertEquals("Should get the original string after serialization and deserialization with the configured encoding",
            str, streamsConfig.defaultKeySerde().deserializer().deserialize(topic, serializer.serialize(topic, str)));

    serializer.configure(serializerConfigs, false);
    assertEquals("Should get the original string after serialization and deserialization with the configured encoding",
            str, streamsConfig.defaultValueSerde().deserializer().deserialize(topic, serializer.serialize(topic, str)));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:StreamsConfigTest.java

示例4: main

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();
    Serde<Long> longSerde = Serdes.Long();

    KStreamBuilder streamTopology = new KStreamBuilder();
    KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
    KStream<String, Long> wordCounts = topicRecords
            .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
            .map((key, word) -> new KeyValue<>(word, word))
            .countByKey("Count")
            .toStream();
    wordCounts.to(stringSerde, longSerde, "wordCount");

    KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:KafkaStreamWordCount.java

示例5: doTestKTable

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
private void doTestKTable(final KStreamBuilder builder, final KTable<String, Integer> table2,
                          final KTable<String, Integer> table3, final String topic1) {
    MockProcessorSupplier<String, Integer> proc2 = new MockProcessorSupplier<>();
    MockProcessorSupplier<String, Integer> proc3 = new MockProcessorSupplier<>();
    table2.toStream().process(proc2);
    table3.toStream().process(proc3);

    driver = new KStreamTestDriver(builder, stateDir, Serdes.String(), Serdes.Integer());

    driver.process(topic1, "A", 1);
    driver.process(topic1, "B", 2);
    driver.process(topic1, "C", 3);
    driver.process(topic1, "D", 4);
    driver.flushState();
    driver.process(topic1, "A", null);
    driver.process(topic1, "B", null);
    driver.flushState();

    proc2.checkAndClearProcessResult("A:null", "B:2", "C:null", "D:4", "A:null", "B:null");
    proc3.checkAndClearProcessResult("A:1", "B:null", "C:3", "D:null", "A:null", "B:null");
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:KTableFilterTest.java

示例6: successfulWindowQuery

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Test
public void successfulWindowQuery() throws Exception{

    GenericBlockingKiqrClient client = new GenericBlockingRestKiqrClientImpl("localhost", port);

    Map<Long, Long> result = client.getWindow("window", String.class, "key1", Long.class, Serdes.String(), Serdes.Long(), 0L, 100001L);
    assertThat(result.entrySet(),hasSize(2));
    assertThat(result, hasEntry(0L, 2L));
    assertThat(result, hasEntry(100000L, 1L));

    Map<Long, Long> resultKey2 = client.getWindow("window", String.class, "key2", Long.class, Serdes.String(), Serdes.Long(), 0L, 100001L);
    assertThat(resultKey2.entrySet(),hasSize(2));
    assertThat(resultKey2, hasEntry(0L, 1L));
    assertThat(resultKey2, hasEntry(100000L, 2L));

    Map<Long, Long> resultKey3 = client.getWindow("window", String.class, "key3", Long.class, Serdes.String(), Serdes.Long(), 0L, 100001L);
    assertThat(resultKey3.entrySet(),hasSize(3));
    assertThat(resultKey3, hasEntry(0L, 1L));
    assertThat(resultKey3, hasEntry(50000L, 1L));
    assertThat(resultKey3, hasEntry(100000L, 1L));

    Map<Long, Long> resultKey4 = client.getWindow("window", String.class, "key4", Long.class, Serdes.String(), Serdes.Long(), 0L, 100001L);
    assertThat(resultKey4.entrySet(),hasSize(1));
    assertThat(resultKey4, hasEntry(0L, 3L));


}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:28,代码来源:GenericClientDistributedIntegrationITCase.java

示例7: doNotChangeOutputOrder

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void doNotChangeOutputOrder() throws EmptyOutputSizeException, NoTopologyException, EmptyInputException {
    List<Message<Integer, Integer>> input = of(1, 2, 3, 4, 5, 6, 7)
        .map(i -> new Message<>(i, i))
        .collect(toList());

    Serde<Integer> integerSerde = Serdes.Integer();

    List<Message<Integer, Integer>> output = Mockafka
        .builder()
        .topology(builder ->
            builder.stream(integerSerde, integerSerde, "numbersTopic")
                .filter((key, value) -> value % 2 == 1)
                .to(integerSerde, integerSerde, "oddNumbersTopic")
        )
        .input("numbersTopic", integerSerde, integerSerde, input.toArray(new Message[]{}))
        .output("oddNumbersTopic", integerSerde, integerSerde, 4);

    List<Message<Integer, Integer>> expected = Arrays.asList(new Message<>(1, 1), new Message<>(3, 3), new Message<>(5, 5), new Message<>(7, 7));
    assertEquals(4, output.size());
    assertEquals(expected, output);
}
 
开发者ID:carlosmenezes,项目名称:mockafka,代码行数:24,代码来源:MockafkaBuilderTest.java

示例8: shouldGetInstanceWithKey

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Test
public void shouldGetInstanceWithKey() throws Exception {
    final TopicPartition tp4 = new TopicPartition("topic-three", 1);
    hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, tp4));

    discovery.onChange(hostToPartitions, cluster.withPartitions(Collections.singletonMap(tp4, new PartitionInfo("topic-three", 1, null, null, null))));

    final StreamsMetadata expected = new StreamsMetadata(hostThree, Utils.mkSet(globalTable, "table-three"),
            Collections.singleton(topic3P0));

    final StreamsMetadata actual = discovery.getMetadataWithKey("table-three",
                                                                "the-key",
                                                                Serdes.String().serializer());

    assertEquals(expected, actual);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:StreamsMetadataStateTest.java

示例9: fetch

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Override
public synchronized WindowStoreIterator<V> fetch(final K key, final long timeFrom, final long timeTo) {
    // since this function may not access the underlying inner store, we need to validate
    // if store is open outside as well.
    validateStoreOpen();

    final Bytes keyBytes = Bytes.wrap(serdes.rawKey(key));
    final WindowStoreIterator<byte[]> underlyingIterator = underlying.fetch(keyBytes, timeFrom, timeTo);

    final Bytes cacheKeyFrom = cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(keyBytes, timeFrom));
    final Bytes cacheKeyTo = cacheFunction.cacheKey(keySchema.upperRangeFixedSize(keyBytes, timeTo));
    final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(name, cacheKeyFrom, cacheKeyTo);

    final HasNextCondition hasNextCondition = keySchema.hasNextCondition(keyBytes,
                                                                         keyBytes,
                                                                         timeFrom,
                                                                         timeTo);
    final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator = new FilteredCacheIterator(
        cacheIterator, hasNextCondition, cacheFunction
    );

    return new MergedSortedCacheWindowStoreIterator<>(filteredCacheIterator,
                                                      underlyingIterator,
                                                      new StateSerdes<>(serdes.topic(), Serdes.Long(), serdes.valueSerde()));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:CachingWindowStore.java

示例10: notFoundWithNoResult

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Test
public void notFoundWithNoResult(TestContext context){
    KafkaStreams streamMock = mock(KafkaStreams.class);
    ReadOnlyKeyValueStore<Object, Object> storeMock = mock(ReadOnlyKeyValueStore.class);
    KeyValueIterator<Object, Object> iteratorMock = mock(KeyValueIterator.class);
    when(streamMock.store(eq("store"), any(QueryableStoreType.class))).thenReturn(storeMock);
    SimpleKeyValueIterator iterator = new SimpleKeyValueIterator();
    when(storeMock.all()).thenReturn(iterator);


    rule.vertx().deployVerticle(new AllKeyValuesQueryVerticle("host", streamMock), context.asyncAssertSuccess(deployment->{

        StoreWideQuery query = new StoreWideQuery("store", Serdes.String().getClass().getName(), Serdes.String().getClass().getName());

        rule.vertx().eventBus().send(Config.ALL_KEY_VALUE_QUERY_ADDRESS_PREFIX + "host", query, context.asyncAssertSuccess(reply ->{

            context.assertTrue(reply.body() instanceof MultiValuedKeyValueQueryResponse);
            MultiValuedKeyValueQueryResponse response = (MultiValuedKeyValueQueryResponse) reply.body();
            context.assertEquals(0, response.getResults().size());
            context.assertTrue(iterator.closed);

        }));

    }));

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:27,代码来源:AllKeyValuesQueryVerticleTest.java

示例11: buildKafkaStreamsDefaults

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
private Properties buildKafkaStreamsDefaults() {
  Properties properties = new Properties();
  properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
      String.format("%sanomaly-detector-app", HEROKU_KAFKA_PREFIX));
  properties.put(StreamsConfig.CLIENT_ID_CONFIG,
      String.format("%sanomaly-detector-client", HEROKU_KAFKA_PREFIX));
  properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  properties.put(
      StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
      Serdes.String().getClass().getName());
  properties.put(
      StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
      Serdes.String().getClass().getName());
  properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
  properties.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG,
      WallclockTimestampExtractor.class);

  return properties;
}
 
开发者ID:kissaten,项目名称:kafka-streams-on-heroku,代码行数:20,代码来源:AnomalyDetectorConfig.java

示例12: before

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Before
public void before() throws InterruptedException {
    testNo++;
    String applicationId = "kstream-repartition-join-test-" + testNo;
    builder = new KStreamBuilder();
    createTopics();
    streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
    streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 3);
    streamsConfiguration.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);

    streamOne = builder.stream(Serdes.Long(), Serdes.Integer(), streamOneInput);
    streamTwo = builder.stream(Serdes.Integer(), Serdes.String(), streamTwoInput);
    streamFour = builder.stream(Serdes.Integer(), Serdes.String(), streamFourInput);

    keyMapper = MockKeyValueMapper.SelectValueKeyValueMapper();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:KStreamRepartitionJoinTest.java

示例13: mapBothStreamsAndLeftJoin

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
private ExpectedOutputOnTopic mapBothStreamsAndLeftJoin() throws Exception {
    final KStream<Integer, Integer> map1 = streamOne.map(keyMapper);

    final KStream<Integer, String> map2 = streamTwo.map(MockKeyValueMapper.<Integer, String>NoOpKeyValueMapper());


    final String outputTopic = "left-join-" + testNo;
    CLUSTER.createTopic(outputTopic);
    map1.leftJoin(map2,
        TOSTRING_JOINER,
        getJoinWindow(),
        Serdes.Integer(),
        Serdes.Integer(),
        Serdes.String())
        .filterNot(new Predicate<Integer, String>() {
            @Override
            public boolean test(Integer key, String value) {
                // filter not left-only join results
                return value.substring(2).equals("null");
            }
        })
        .to(Serdes.Integer(), Serdes.String(), outputTopic);

    return new ExpectedOutputOnTopic(expectedStreamOneTwoJoin, outputTopic);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:KStreamRepartitionJoinTest.java

示例14: createCountStream

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
/**
 * Creates a typical word count topology
 */
private KafkaStreams createCountStream(final String inputTopic, final String outputTopic, final Properties streamsConfiguration) {
    final KStreamBuilder builder = new KStreamBuilder();
    final Serde<String> stringSerde = Serdes.String();
    final KStream<String, String> textLines = builder.stream(stringSerde, stringSerde, inputTopic);

    final KGroupedStream<String, String> groupedByWord = textLines
        .flatMapValues(new ValueMapper<String, Iterable<String>>() {
            @Override
            public Iterable<String> apply(final String value) {
                return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"));
            }
        })
        .groupBy(MockKeyValueMapper.<String, String>SelectValueMapper());

    // Create a State Store for the all time word count
    groupedByWord.count("word-count-store-" + inputTopic).to(Serdes.String(), Serdes.Long(), outputTopic);

    // Create a Windowed State Store that contains the word count for every 1 minute
    groupedByWord.count(TimeWindows.of(WINDOW_SIZE), "windowed-word-count-store-" + inputTopic);

    return new KafkaStreams(builder, streamsConfiguration);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:QueryableStateIntegrationTest.java

示例15: testQueryableJoin

import org.apache.kafka.common.serialization.Serdes; //导入依赖的package包/类
@Test
public void testQueryableJoin() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();

    final int[] expectedKeys = new int[]{0, 1, 2, 3};

    final KTable<Integer, String> table1;
    final KTable<Integer, String> table2;
    final KTable<Integer, String> joined;
    final MockProcessorSupplier<Integer, String> processor;

    processor = new MockProcessorSupplier<>();
    table1 = builder.table(intSerde, stringSerde, topic1, storeName1);
    table2 = builder.table(intSerde, stringSerde, topic2, storeName2);
    joined = table1.join(table2, MockValueJoiner.TOSTRING_JOINER, Serdes.String(), "anyQueryableName");
    joined.toStream().process(processor);

    doTestJoin(builder, expectedKeys, processor, joined);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:KTableKTableJoinTest.java


注:本文中的org.apache.kafka.common.serialization.Serdes类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。