当前位置: 首页>>代码示例>>Java>>正文


Java KeyValue类代码示例

本文整理汇总了Java中org.apache.kafka.streams.KeyValue的典型用法代码示例。如果您正苦于以下问题:Java KeyValue类的具体用法?Java KeyValue怎么用?Java KeyValue使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KeyValue类属于org.apache.kafka.streams包,在下文中一共展示了KeyValue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Override
public void start() throws Exception {

    execute(Config.ALL_KEY_VALUE_QUERY_ADDRESS_PREFIX, (query, keySerde, valueSerde) -> {

        ReadOnlyKeyValueStore<Object, Object> kvStore = streams.store(query.getStoreName(), QueryableStoreTypes.keyValueStore());
        MultiValuedKeyValueQueryResponse response;
        try (KeyValueIterator<Object, Object> result = kvStore.all()) {
            if (result.hasNext()) {
                Map<String, String> results = new HashMap<>();
                while (result.hasNext()) {
                    KeyValue<Object, Object> kvEntry = result.next();

                    results.put(base64Encode(keySerde, kvEntry.key), base64Encode(valueSerde, kvEntry.value));
                }
               return new MultiValuedKeyValueQueryResponse(results);
            } else {
               return new MultiValuedKeyValueQueryResponse(Collections.emptyMap());
            }
        }
    });

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:24,代码来源:AllKeyValuesQueryVerticle.java

示例2: shouldFindValueForKeyWhenMultiStores

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Test
public void shouldFindValueForKeyWhenMultiStores() throws Exception {
    final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new
        ReadOnlyWindowStoreStub<>(WINDOW_SIZE);
    stubProviderTwo.addStore(storeName, secondUnderlying);

    underlyingWindowStore.put("key-one", "value-one", 0L);
    secondUnderlying.put("key-two", "value-two", 10L);

    final List<KeyValue<Long, String>> keyOneResults = StreamsTestUtils.toList(windowStore.fetch("key-one", 0L,
                                                                                                 1L));
    final List<KeyValue<Long, String>> keyTwoResults = StreamsTestUtils.toList(windowStore.fetch("key-two", 10L,
                                                                                                 11L));

    assertEquals(Collections.singletonList(KeyValue.pair(0L, "value-one")), keyOneResults);
    assertEquals(Collections.singletonList(KeyValue.pair(10L, "value-two")), keyTwoResults);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:CompositeReadOnlyWindowStoreTest.java

示例3: flatMap

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Override
public Iterable<KeyValue<StatEventKey, StatAggregate>> flatMap(String statUuid, StatisticWrapper statisticWrapper) {
    Preconditions.checkNotNull(statUuid);
    Preconditions.checkNotNull(statisticWrapper);

    int maxEventIds = stroomPropertyService.getIntProperty(StatAggregate.PROP_KEY_MAX_AGGREGATED_EVENT_IDS, Integer.MAX_VALUE);
    Statistics.Statistic statistic = Preconditions.checkNotNull(statisticWrapper.getStatistic());

    List<MultiPartIdentifier> eventIds = convertEventIds(statistic, maxEventIds);

    //convert stat value
    ValueAggregate statAggregate = new ValueAggregate(eventIds, maxEventIds, statistic.getValue());

    List<KeyValue<StatEventKey, StatAggregate>> keyValues = buildKeyValues(statUuid, statisticWrapper, statAggregate);

    LOGGER.trace(() -> String.format("Flat mapping event into %s events", keyValues.size()));
    return keyValues;
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:19,代码来源:ValueStatToAggregateFlatMapper.java

示例4: shouldHaveMultipleSessionsForSameIdWhenTimestampApartBySessionGap

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Test
public void shouldHaveMultipleSessionsForSameIdWhenTimestampApartBySessionGap() throws Exception {
    final String sessionId = "mel";
    long time = 0;
    context.setTime(time);
    processor.process(sessionId, "first");
    context.setTime(time += GAP_MS + 1);
    processor.process(sessionId, "second");
    processor.process(sessionId, "second");
    context.setTime(time += GAP_MS + 1);
    processor.process(sessionId, "third");
    processor.process(sessionId, "third");
    processor.process(sessionId, "third");

    sessionStore.flush();
    assertEquals(Arrays.asList(
            KeyValue.pair(new Windowed<>(sessionId, new SessionWindow(0, 0)), new Change<>(1L, null)),
            KeyValue.pair(new Windowed<>(sessionId, new SessionWindow(GAP_MS + 1, GAP_MS + 1)), new Change<>(2L, null)),
            KeyValue.pair(new Windowed<>(sessionId, new SessionWindow(time, time)), new Change<>(3L, null))

    ), results);

}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:KStreamSessionWindowAggregateProcessorTest.java

示例5: getAggSum

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
/**
 * Get the sum of all count aggregate values passed to the {@link StatisticsService} so far, ensuring the
 * statKey is for out statName
 */
private long getAggSum(UID statNameUid) {


    Mockito.verify(mockStatisticsService, Mockito.atLeast(0)).putAggregatedEvents(statTypeCaptor.capture(),
            intervalCaptor.capture(),
            aggregatesMapCaptor.capture());

    aggregatesMapCaptor.getAllValues().forEach(map ->
            map.forEach((k, v) -> LOGGER.debug("{} - {} ", k, v)));

    return aggregatesMapCaptor.getAllValues().stream()
            .flatMap(map -> map.entrySet().stream().map(entry -> new KeyValue<>(entry.getKey(), entry.getValue())))
            .filter(kv -> kv.key.getStatUuid().equals(statNameUid))
            .mapToLong(kv -> ((CountAggregate) kv.value).getAggregatedCount())
            .sum();
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:21,代码来源:StatisticsAggregationServiceIT.java

示例6: assertOnKeyValue

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
private void assertOnKeyValue(KeyValue<StatEventKey, StatAggregate> keyValue, Statistics.Statistic statistic, StatisticConfiguration statisticConfiguration) {
    StatEventKey statEventKey = keyValue.key;
    StatAggregate statAggregate = keyValue.value;
    assertThat(statEventKey).isNotNull();
    assertThat(statEventKey.getTagValues()).hasSize(statistic.getTags().getTag().size());
    //make sure the tags match
    assertThat(statEventKey.getTagValues().stream()
            .map(tagValue -> uniqueIdCache.getName(tagValue.getTag()))
            .collect(Collectors.toList()))
            .isEqualTo(statistic.getTags().getTag().stream()
                    .map(TagType::getName)
                    .collect(Collectors.toList()));
    assertThat(statEventKey.getInterval()).isEqualTo(statisticConfiguration.getPrecision());

    assertThat(statAggregate).isNotNull();
    assertThat(statAggregate).isExactlyInstanceOf(CountAggregate.class);

    CountAggregate countAggregate = (CountAggregate) statAggregate;
    assertThat(countAggregate.getAggregatedCount()).isEqualTo(statistic.getCount());
    assertThat(countAggregate.getEventIds()).hasSize(2);

    MultiPartIdentifier id1 = countAggregate.getEventIds().get(0);
    assertThat(id1.getValue()).contains(id1part1, id1part2);
    MultiPartIdentifier id2 = countAggregate.getEventIds().get(1);
    assertThat(id2.getValue()).contains(id2part1, id2part2);
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:27,代码来源:TestCountStatToAggregateFlatMapper.java

示例7: init

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@PostConstruct
public void init() {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-processor");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:9092");
    props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper:2181");
    props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
    props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://schema-registry:8081");

    KStreamBuilder builder = new KStreamBuilder();

    builder.stream("tweets")
            .map((k, v) -> {
                Tweet tweet = (Tweet) SpecificData.get().deepCopy(Tweet.getClassSchema(), v);
                return new KeyValue<>(tweet.getId(), tweet.getText().toString());
            })
            .to(Serdes.Long(), Serdes.String(), "processed-tweets");

    KafkaStreams streams = new KafkaStreams(builder, props);
    streams.start();
}
 
开发者ID:jeqo,项目名称:talk-kafka-messaging-logs,代码行数:22,代码来源:KafkaTweetProcessor.java

示例8: view

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Override
public KTableValueGetterSupplier<K, KeyValue<K1, V1>> view() {
    final KTableValueGetterSupplier<K, V> parentValueGetterSupplier = parent.valueGetterSupplier();

    return new KTableValueGetterSupplier<K, KeyValue<K1, V1>>() {

        public KTableValueGetter<K, KeyValue<K1, V1>> get() {
            return new KTableMapValueGetter(parentValueGetterSupplier.get());
        }

        @Override
        public String[] storeNames() {
            throw new StreamsException("Underlying state store not accessible due to repartitioning.");
        }
    };
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:KTableRepartitionMap.java

示例9: process

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
/**
 * @throws StreamsException if key is null
 */
@Override
public void process(K key, Change<V> change) {
    // the original key should never be null
    if (key == null)
        throw new StreamsException("Record key for the grouping KTable should not be null.");

    // if the value is null, we do not need to forward its selected key-value further
    KeyValue<? extends K1, ? extends V1> newPair = change.newValue == null ? null : mapper.apply(key, change.newValue);
    KeyValue<? extends K1, ? extends V1> oldPair = change.oldValue == null ? null : mapper.apply(key, change.oldValue);

    // if the selected repartition key or value is null, skip
    // forward oldPair first, to be consistent with reduce and aggregate
    if (oldPair != null && oldPair.key != null && oldPair.value != null) {
        context().forward(oldPair.key, new Change<>(null, oldPair.value));
    }

    if (newPair != null && newPair.key != null && newPair.value != null) {
        context().forward(newPair.key, new Change<>(newPair.value, null));
    }
    
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:25,代码来源:KTableRepartitionMap.java

示例10: verifyKTableKTableJoin

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
private void verifyKTableKTableJoin(final JoinType joinType1,
                                    final JoinType joinType2,
                                    final List<KeyValue<String, String>> expectedResult,
                                    boolean verifyQueryableState) throws Exception {
    final String queryableName = verifyQueryableState ? joinType1 + "-" + joinType2 + "-ktable-ktable-join-query" : null;
    streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, joinType1 + "-" + joinType2 + "-ktable-ktable-join" + queryableName);

    streams = prepareTopology(joinType1, joinType2, queryableName);
    streams.start();

    final List<KeyValue<String, String>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(
            CONSUMER_CONFIG,
            OUTPUT,
            expectedResult.size());

    assertThat(result, equalTo(expectedResult));

    if (verifyQueryableState) {
        verifyKTableKTableJoinQueryableState(joinType1, joinType2, expectedResult);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:KTableKTableJoinIntegrationTest.java

示例11: shouldSupportAllAcrossMultipleStores

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Test
public void shouldSupportAllAcrossMultipleStores() throws Exception {
    final KeyValueStore<String, String> cache = newStoreInstance();
    stubProviderTwo.addStore(storeName, cache);

    stubOneUnderlying.put("a", "a");
    stubOneUnderlying.put("b", "b");
    stubOneUnderlying.put("z", "z");

    cache.put("c", "c");
    cache.put("d", "d");
    cache.put("x", "x");

    final List<KeyValue<String, String>> results = toList(theStore.all());
    assertTrue(results.contains(new KeyValue<>("a", "a")));
    assertTrue(results.contains(new KeyValue<>("b", "b")));
    assertTrue(results.contains(new KeyValue<>("c", "c")));
    assertTrue(results.contains(new KeyValue<>("d", "d")));
    assertTrue(results.contains(new KeyValue<>("x", "x")));
    assertTrue(results.contains(new KeyValue<>("z", "z")));
    assertEquals(6, results.size());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:CompositeReadOnlyKeyValueStoreTest.java

示例12: shouldUpdateValuesForExistingKeysOnPutAll

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Test
public void shouldUpdateValuesForExistingKeysOnPutAll() {
    final List<KeyValue<Integer, String>> kvPairs = Arrays.asList(KeyValue.pair(1, "1"),
            KeyValue.pair(2, "2"),
            KeyValue.pair(3, "3"));

    store.putAll(kvPairs);
    

    final List<KeyValue<Integer, String>> updatedKvPairs = Arrays.asList(KeyValue.pair(1, "ONE"),
            KeyValue.pair(2, "TWO"),
            KeyValue.pair(3, "THREE"));

    store.putAll(updatedKvPairs);

    assertThat(store.approximateNumEntries(), equalTo(3L));
    
    for (KeyValue<Integer, String> kvPair : updatedKvPairs) {
        assertThat(store.get(kvPair.key), equalTo(kvPair.value));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:InMemoryLRUCacheStoreTest.java

示例13: groupBy

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Override
public <K1, V1> KGroupedTable<K1, V1> groupBy(KeyValueMapper<? super K, ? super V, KeyValue<K1, V1>> selector,
                                              Serde<K1> keySerde,
                                              Serde<V1> valueSerde) {

    Objects.requireNonNull(selector, "selector can't be null");
    String selectName = topology.newName(SELECT_NAME);

    KTableProcessorSupplier<K, V, KeyValue<K1, V1>> selectSupplier = new KTableRepartitionMap<K, V, K1, V1>(this, selector);

    // select the aggregate key and values (old and new), it would require parent to send old values
    topology.addProcessor(selectName, selectSupplier, this.name);
    this.enableSendingOldValues();

    return new KGroupedTableImpl<>(topology, selectName, this.name, keySerde, valueSerde);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:KTableImpl.java

示例14: shouldMergeSessions

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
@Test
public void shouldMergeSessions() throws Exception {
    context.setTime(0);
    final String sessionId = "mel";
    processor.process(sessionId, "first");
    assertTrue(sessionStore.findSessions(sessionId, 0, 0).hasNext());

    // move time beyond gap
    context.setTime(GAP_MS + 1);
    processor.process(sessionId, "second");
    assertTrue(sessionStore.findSessions(sessionId, GAP_MS + 1, GAP_MS + 1).hasNext());
    // should still exist as not within gap
    assertTrue(sessionStore.findSessions(sessionId, 0, 0).hasNext());
    // move time back
    context.setTime(GAP_MS / 2);
    processor.process(sessionId, "third");

    final KeyValueIterator<Windowed<String>, Long> iterator = sessionStore.findSessions(sessionId, 0, GAP_MS + 1);
    final KeyValue<Windowed<String>, Long> kv = iterator.next();

    assertEquals(Long.valueOf(3), kv.value);
    assertFalse(iterator.hasNext());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:KStreamSessionWindowAggregateProcessorTest.java

示例15: assertThatOldestWindowContains

import org.apache.kafka.streams.KeyValue; //导入依赖的package包/类
/**
 * Asserts that the oldest available window in the window store contains the expected content.
 *
 * @param store    the store to be validated
 * @param expected the expected contents of the store
 * @param <K>      the store's key type
 * @param <V>      the store's value type
 */
public static <K, V> void assertThatOldestWindowContains(ReadOnlyWindowStore<K, V> store, Map<K, V> expected) {
  long fromBeginningOfTimeMs = 0;
  long toNowInProcessingTimeMs = System.currentTimeMillis();
  for (K key : expected.keySet()) {
    long windowCounter = 0;
    // For each key, `ReadOnlyWindowStore#fetch()` guarantees that the iterator iterates through
    // the windows in ascending-time order; that is, the first window (if any) is the oldest
    // available window for that key.
    try (WindowStoreIterator<V> iterator = store.fetch(key, fromBeginningOfTimeMs, toNowInProcessingTimeMs)) {
      while (iterator.hasNext() && windowCounter <= 1) {
        windowCounter++;
        KeyValue<Long, V> next = iterator.next();
        V actualValue = next.value;
        assertThat(actualValue).isEqualTo(expected.get(key));
      }
    }
  }
}
 
开发者ID:kaiwaehner,项目名称:kafka-streams-machine-learning-examples,代码行数:27,代码来源:IntegrationTestUtils.java


注:本文中的org.apache.kafka.streams.KeyValue类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。