当前位置: 首页>>代码示例>>Java>>正文


Java Serdes.Long方法代码示例

本文整理汇总了Java中org.apache.kafka.common.serialization.Serdes.Long方法的典型用法代码示例。如果您正苦于以下问题:Java Serdes.Long方法的具体用法?Java Serdes.Long怎么用?Java Serdes.Long使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.common.serialization.Serdes的用法示例。


在下文中一共展示了Serdes.Long方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
    final SessionKeySchema schema = new SessionKeySchema();
    schema.init("topic");
    final int retention = 60000;
    final int numSegments = 3;
    underlying = new RocksDBSegmentedBytesStore("test", retention, numSegments, schema);
    final RocksDBSessionStore<Bytes, byte[]> sessionStore = new RocksDBSessionStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray());
    cachingStore = new CachingSessionStore<>(sessionStore,
                                             Serdes.String(),
                                             Serdes.Long(),
                                             Segments.segmentInterval(retention, numSegments)
                                             );
    cache = new ThreadCache("testCache", MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    context = new MockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, "topic"));
    cachingStore.init(context, cachingStore);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:CachingSessionStoreTest.java

示例2: main

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();
    Serde<Long> longSerde = Serdes.Long();

    KStreamBuilder streamTopology = new KStreamBuilder();
    KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
    KStream<String, Long> wordCounts = topicRecords
            .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
            .map((key, word) -> new KeyValue<>(word, word))
            .countByKey("Count")
            .toStream();
    wordCounts.to(stringSerde, longSerde, "wordCount");

    KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:KafkaStreamWordCount.java

示例3: fetch

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Override
public synchronized WindowStoreIterator<V> fetch(final K key, final long timeFrom, final long timeTo) {
    // since this function may not access the underlying inner store, we need to validate
    // if store is open outside as well.
    validateStoreOpen();

    final Bytes keyBytes = Bytes.wrap(serdes.rawKey(key));
    final WindowStoreIterator<byte[]> underlyingIterator = underlying.fetch(keyBytes, timeFrom, timeTo);

    final Bytes cacheKeyFrom = cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(keyBytes, timeFrom));
    final Bytes cacheKeyTo = cacheFunction.cacheKey(keySchema.upperRangeFixedSize(keyBytes, timeTo));
    final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(name, cacheKeyFrom, cacheKeyTo);

    final HasNextCondition hasNextCondition = keySchema.hasNextCondition(keyBytes,
                                                                         keyBytes,
                                                                         timeFrom,
                                                                         timeTo);
    final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator = new FilteredCacheIterator(
        cacheIterator, hasNextCondition, cacheFunction
    );

    return new MergedSortedCacheWindowStoreIterator<>(filteredCacheIterator,
                                                      underlyingIterator,
                                                      new StateSerdes<>(serdes.topic(), Serdes.Long(), serdes.valueSerde()));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:CachingWindowStore.java

示例4: before

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Before
public void before() {
    final NoOpRecordCollector collector = new NoOpRecordCollector() {
        @Override
        public <K, V> void send(final String topic,
                                K key,
                                V value,
                                Integer partition,
                                Long timestamp,
                                Serializer<K> keySerializer,
                                Serializer<V> valueSerializer) {
            sent.put(key, value);
        }
    };
    context = new MockProcessorContext(
        TestUtils.tempDirectory(),
        Serdes.String(),
        Serdes.Long(),
        collector,
        new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
    context.setTime(0);
    store.init(context, store);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:ChangeLoggingKeyValueBytesStoreTest.java

示例5: before

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Before
public void before() {
    final SessionKeySchema schema = new SessionKeySchema();
    schema.init("topic");
    bytesStore = new RocksDBSegmentedBytesStore(storeName,
                                                retention,
                                                numSegments,
                                                schema);

    stateDir = TestUtils.tempDirectory();
    context = new MockProcessorContext(
        stateDir,
        Serdes.String(),
        Serdes.Long(),
        new NoOpRecordCollector(),
        new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
    bytesStore.init(context, bytesStore);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:RocksDBSegmentedBytesStoreTest.java

示例6: initStore

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
private void initStore(final boolean enableCaching) {
    final RocksDBSessionStoreSupplier<String, Long> supplier =
            new RocksDBSessionStoreSupplier<>(STORE_NAME,
                                              GAP_MS * 3,
                                              Serdes.String(),
                                              Serdes.Long(),
                                              false,
                                              Collections.<String, String>emptyMap(),
                                              enableCaching);
    sessionStore = (SessionStore<String, Long>) supplier.get();
    sessionStore.init(context, sessionStore);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:13,代码来源:KStreamSessionWindowAggregateProcessorTest.java

示例7: createContext

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Before
public void createContext() {
    context = new MockProcessorContext(TestUtils.tempDirectory(),
                                       Serdes.String(),
                                       Serdes.Long(),
                                       new NoOpRecordCollector(),
                                       new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
    segments = new Segments("test", 4 * 60 * 1000, NUM_SEGMENTS);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:SegmentsTest.java

示例8: successfulAllQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void successfulAllQuery() throws Exception{

    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "kv", String.class, Long.class, Serdes.String(), Serdes.Long());

    Map<String, Long> result = client.getAllKeyValues();
    assertThat(result.entrySet(),hasSize(4));
    assertThat(result, hasEntry("key1", 3L));
    assertThat(result, hasEntry("key2", 6L));
    assertThat(result, hasEntry("key3", 9L));
    assertThat(result, hasEntry("key4", 12L));

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:14,代码来源:SpecificClientIntegrationITCase.java

示例9: shouldCompactTopicsForStateChangelogs

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void shouldCompactTopicsForStateChangelogs() throws Exception {
    //
    // Step 1: Configure and start a simple word count topology
    //
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();

    final Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "compact-topics-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<String, String> textLines = builder.stream(DEFAULT_INPUT_TOPIC);

    final KStream<String, Long> wordCounts = textLines
            .flatMapValues(new ValueMapper<String, Iterable<String>>() {
                @Override
                public Iterable<String> apply(final String value) {
                    return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"));
                }
            }).groupBy(MockKeyValueMapper.<String, String>SelectValueMapper())
            .count("Counts").toStream();

    wordCounts.to(stringSerde, longSerde, DEFAULT_OUTPUT_TOPIC);

    // Remove any state from previous test runs
    IntegrationTestUtils.purgeLocalStreamsState(streamsConfiguration);

    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
    streams.start();

    //
    // Step 2: Produce some input data to the input topic.
    //
    produceData(Arrays.asList("hello", "world", "world", "hello world"));

    //
    // Step 3: Verify the state changelog topics are compact
    //
    streams.close();
    final Properties properties = getTopicConfigProperties(ProcessorStateManager.storeChangelogTopic(applicationId, "Counts"));
    assertEquals(LogConfig.Compact(), properties.getProperty(LogConfig.CleanupPolicyProp()));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:49,代码来源:InternalTopicIntegrationTest.java

示例10: shouldFetchExactKeys

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void shouldFetchExactKeys() throws Exception {
    final RocksDBSegmentedBytesStore bytesStore =
            new RocksDBSegmentedBytesStore("session-store", 0x7a00000000000000L, 2, new SessionKeySchema());

    sessionStore = new RocksDBSessionStore<>(bytesStore,
                                             Serdes.String(),
                                             Serdes.Long());

    sessionStore.init(context, sessionStore);

    sessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L);
    sessionStore.put(new Windowed<>("aa", new SessionWindow(0, 0)), 2L);
    sessionStore.put(new Windowed<>("a", new SessionWindow(10, 20)), 3L);
    sessionStore.put(new Windowed<>("aa", new SessionWindow(10, 20)), 4L);
    sessionStore.put(new Windowed<>("a", new SessionWindow(0x7a00000000000000L - 2, 0x7a00000000000000L - 1)), 5L);

    KeyValueIterator<Windowed<String>, Long> iterator = sessionStore.findSessions("a", 0, Long.MAX_VALUE);
    List<Long> results = new ArrayList<>();
    while (iterator.hasNext()) {
        results.add(iterator.next().value);
    }

    assertThat(results, equalTo(Arrays.asList(1L, 3L, 5L)));


    iterator = sessionStore.findSessions("aa", 0, Long.MAX_VALUE);
    results = new ArrayList<>();
    while (iterator.hasNext()) {
        results.add(iterator.next().value);
    }

    assertThat(results, equalTo(Arrays.asList(2L, 4L)));


    final KeyValueIterator<Windowed<String>, Long> rangeIterator = sessionStore.findSessions("a", "aa", 0, Long.MAX_VALUE);
    final List<Long> rangeResults = new ArrayList<>();
    while (rangeIterator.hasNext()) {
        rangeResults.add(rangeIterator.next().value);
    }
    assertThat(rangeResults, equalTo(Arrays.asList(1L, 3L, 2L, 4L, 5L)));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:43,代码来源:RocksDBSessionStoreTest.java

示例11: successfulScalarQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void successfulScalarQuery() throws Exception{



    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "kv", String.class, Long.class, Serdes.String(), Serdes.Long());

    Optional<Long> resultKey1 = client.getScalarKeyValue("key1");
    assertTrue(resultKey1.isPresent());
    assertThat(resultKey1.get(), is(equalTo(3L)));

    Optional<Long> resultKey2 = client.getScalarKeyValue("key3");
    assertTrue(resultKey2.isPresent());
    assertThat(resultKey2.get(), is(equalTo(9L)));

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:17,代码来源:SpecificClientIntegrationITCase.java

示例12: wrongStoreTypeRangeQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test(expected = QueryExecutionException.class)
public void wrongStoreTypeRangeQuery() throws Exception{

    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "window", String.class, Long.class, Serdes.String(), Serdes.Long());

    Map<String, Long> result = client.getRangeKeyValues("key1", "key2");



}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:11,代码来源:SpecificClientIntegrationITCase.java

示例13: emptyRangeQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void emptyRangeQuery() throws Exception{

    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "kv", String.class, Long.class, Serdes.String(), Serdes.Long());


    Map<String, Long> result = client.getRangeKeyValues("key6", "key7");
    assertThat(result.entrySet(),is(empty()));

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:11,代码来源:SpecificClientIntegrationITCase.java

示例14: noSuchStoreRangeQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test
public void noSuchStoreRangeQuery() throws Exception{

    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "idontexist", String.class, Long.class, Serdes.String(), Serdes.Long());

    Map<String, Long> result = client.getRangeKeyValues("key1", "key2");
    assertTrue(result.isEmpty());


}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:11,代码来源:SpecificClientIntegrationITCase.java

示例15: invertedRangeQuery

import org.apache.kafka.common.serialization.Serdes; //导入方法依赖的package包/类
@Test(expected = QueryExecutionException.class)
public void invertedRangeQuery() throws Exception{

    SpecificBlockingKiqrClient<String, Long> client = new SpecificBlockingRestKiqrClientImpl<>("localhost", 44321, "kv", String.class, Long.class, Serdes.String(), Serdes.Long());


    Map<String, Long> result = client.getRangeKeyValues("key3", "key1");
    assertThat(result.entrySet(),is(empty()));

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:11,代码来源:SpecificClientIntegrationITCase.java


注:本文中的org.apache.kafka.common.serialization.Serdes.Long方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。