当前位置: 首页>>代码示例>>Java>>正文


Java Bytes类代码示例

本文整理汇总了Java中org.apache.kafka.common.utils.Bytes的典型用法代码示例。如果您正苦于以下问题:Java Bytes类的具体用法?Java Bytes怎么用?Java Bytes使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Bytes类属于org.apache.kafka.common.utils包,在下文中一共展示了Bytes类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: shouldKeepTrackOfMostRecentlyAndLeastRecentlyUsed

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void shouldKeepTrackOfMostRecentlyAndLeastRecentlyUsed() throws IOException {
    List<KeyValue<String, String>> toInsert = Arrays.asList(
            new KeyValue<>("K1", "V1"),
            new KeyValue<>("K2", "V2"),
            new KeyValue<>("K3", "V3"),
            new KeyValue<>("K4", "V4"),
            new KeyValue<>("K5", "V5"));
    for (int i = 0; i < toInsert.size(); i++) {
        byte[] key = toInsert.get(i).key.getBytes();
        byte[] value = toInsert.get(i).value.getBytes();
        cache.put(Bytes.wrap(key), new LRUCacheEntry(value, true, 1, 1, 1, ""));
        LRUCacheEntry head = cache.first();
        LRUCacheEntry tail = cache.last();
        assertEquals(new String(head.value), toInsert.get(i).value);
        assertEquals(new String(tail.value), toInsert.get(0).value);
        assertEquals(cache.flushes(), 0);
        assertEquals(cache.hits(), 0);
        assertEquals(cache.misses(), 0);
        assertEquals(cache.overwrites(), 0);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:NamedCacheTest.java

示例2: testUpperBoundWithKeyBytesLargerThanFirstTimestampByte

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void testUpperBoundWithKeyBytesLargerThanFirstTimestampByte() throws Exception {
    Bytes upper = sessionKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}), Long.MAX_VALUE);

    assertThat(
        "shorter key with max timestamp should be in range",
        upper.compareTo(
            SessionKeySerde.bytesToBinary(
                new Windowed<>(
                    Bytes.wrap(new byte[]{0xA, (byte) 0x8F}),
                    new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE))
            )
        ) >= 0
    );

    assertThat(upper, equalTo(SessionKeySerde.bytesToBinary(
        new Windowed<>(Bytes.wrap(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE))))
    );
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:SessionKeySchemaTest.java

示例3: setUp

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
    keySchema = new WindowKeySchema();
    final int retention = 30000;
    final int numSegments = 3;
    underlying = new RocksDBSegmentedBytesStore("test", retention, numSegments, keySchema);
    final RocksDBWindowStore<Bytes, byte[]> windowStore = new RocksDBWindowStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray(), false, WINDOW_SIZE);
    cacheListener = new CachingKeyValueStoreTest.CacheFlushListenerStub<>();
    cachingStore = new CachingWindowStore<>(windowStore,
                                            Serdes.String(),
                                            Serdes.String(),
                                            WINDOW_SIZE,
                                            Segments.segmentInterval(retention, numSegments));
    cachingStore.setFlushListener(cacheListener);
    cache = new ThreadCache("testCache", MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    topic = "topic";
    context = new MockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, topic));
    cachingStore.init(context, cachingStore);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:CachingWindowStoreTest.java

示例4: shouldIterateOverAllSegments

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void shouldIterateOverAllSegments() throws Exception {
    iterator = new SegmentIterator(Arrays.asList(segmentOne, segmentTwo).iterator(),
            hasNextCondition,
            Bytes.wrap("a".getBytes()),
            Bytes.wrap("z".getBytes()));

    assertTrue(iterator.hasNext());
    assertEquals("a", new String(iterator.peekNextKey().get()));
    assertEquals(KeyValue.pair("a", "1"), toStringKeyValue(iterator.next()));

    assertTrue(iterator.hasNext());
    assertEquals("b", new String(iterator.peekNextKey().get()));
    assertEquals(KeyValue.pair("b", "2"), toStringKeyValue(iterator.next()));

    assertTrue(iterator.hasNext());
    assertEquals("c", new String(iterator.peekNextKey().get()));
    assertEquals(KeyValue.pair("c", "3"), toStringKeyValue(iterator.next()));

    assertTrue(iterator.hasNext());
    assertEquals("d", new String(iterator.peekNextKey().get()));
    assertEquals(KeyValue.pair("d", "4"), toStringKeyValue(iterator.next()));

    assertFalse(iterator.hasNext());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:SegmentIteratorTest.java

示例5: hasNextCondition

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Override
public HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to) {
    return new HasNextCondition() {
        @Override
        public boolean hasNext(final KeyValueIterator<Bytes, ?> iterator) {
            while (iterator.hasNext()) {
                final Bytes bytes = iterator.peekNextKey();
                final Windowed<Bytes> windowedKey = SessionKeySerde.fromBytes(bytes);
                if (windowedKey.key().compareTo(binaryKeyFrom) >= 0
                    && windowedKey.key().compareTo(binaryKeyTo) <= 0
                    && windowedKey.window().end() >= from
                    && windowedKey.window().start() <= to) {
                    return true;
                }
                iterator.next();
            }
            return false;
        }
    };
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:SessionKeySchema.java

示例6: get

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
private V get(final byte[] rawKey) {
    final Bytes key = Bytes.wrap(rawKey);
    final LRUCacheEntry entry = cache.get(cacheName, key);
    if (entry == null) {
        final byte[] rawValue = underlying.get(key);
        if (rawValue == null) {
            return null;
        }
        // only update the cache if this call is on the streamThread
        // as we don't want other threads to trigger an eviction/flush
        if (Thread.currentThread().equals(streamThread)) {
            cache.put(cacheName, key, new LRUCacheEntry(rawValue));
        }
        return serdes.valueFrom(rawValue);
    }

    if (entry.value == null) {
        return null;
    }

    return serdes.valueFrom(entry.value);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:CachingKeyValueStore.java

示例7: main

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

    final StreamsBuilder builder = new StreamsBuilder();

    builder.<String, String>stream("streams-plaintext-input")
           .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
           .groupBy((key, value) -> value)
           .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"))
           .toStream()
           .to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long()));

    final Topology topology = builder.build();
    final KafkaStreams streams = new KafkaStreams(topology, props);
    final CountDownLatch latch = new CountDownLatch(1);

    // attach shutdown handler to catch control-c
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
        @Override
        public void run() {
            streams.close();
            latch.countDown();
        }
    });

    try {
        streams.start();
        latch.await();
    } catch (Throwable e) {
        System.exit(1);
    }
    System.exit(0);
}
 
开发者ID:smarcu,项目名称:datastreaming-presentation,代码行数:38,代码来源:WordCount.java

示例8: shouldPutIfAbsent

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void shouldPutIfAbsent() throws Exception {
    final ThreadCache cache = new ThreadCache("testCache", 100000, new MockStreamsMetrics(new Metrics()));
    final Bytes key = Bytes.wrap(new byte[]{10});
    final byte[] value = {30};
    assertNull(cache.putIfAbsent("n", key, dirtyEntry(value)));
    assertArrayEquals(value, cache.putIfAbsent("n", key, dirtyEntry(new byte[]{8})).value);
    assertArrayEquals(value, cache.get("n", key).value);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:ThreadCacheTest.java

示例9: put

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Override
public void put(final Bytes key, final byte[] value) {
    long startNs = time.nanoseconds();
    try {
        this.inner.put(key, value);
    } finally {
        this.metrics.recordLatency(this.putTime, startNs, time.nanoseconds());
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:MeteredSegmentedBytesStore.java

示例10: putAll

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
    inner.putAll(entries);
    for (KeyValue<Bytes, byte[]> entry : entries) {
        changeLogger.logChange(entry.key, entry.value);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:8,代码来源:ChangeLoggingKeyValueBytesStore.java

示例11: toList

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
private List<KeyValue<Windowed<String>, Long>> toList(final KeyValueIterator<Bytes, byte[]> iterator) {
    final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
    while (iterator.hasNext()) {
        final KeyValue<Bytes, byte[]> next = iterator.next();
        final KeyValue<Windowed<String>, Long> deserialized
                = KeyValue.pair(SessionKeySerde.from(next.key.get(), Serdes.String().deserializer(), "dummy"), Serdes.Long().deserializer().deserialize("", next.value));
        results.add(deserialized);
    }
    return results;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:RocksDBSegmentedBytesStoreTest.java

示例12: fetchPrevious

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
private AGG fetchPrevious(final Bytes rawKey, final Window window) {
    try (final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = bytesStore
            .findSessions(rawKey, window.start(), window.end())) {
        if (!iterator.hasNext()) {
            return null;
        }
        return serdes.valueFrom(iterator.next().value);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:CachingSessionStore.java

示例13: put

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Override
public void put(final Bytes key, final byte[] value) {
    if (key != null) {
        bytesStore.put(key, value);
        changeLogger.logChange(key, value);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:8,代码来源:ChangeLoggingSegmentedBytesStore.java

示例14: shouldPeekNextKey

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void shouldPeekNextKey() throws Exception {
    while (allIterator.hasNext()) {
        final Bytes nextKey = allIterator.peekNextKey();
        final KeyValue<Bytes, LRUCacheEntry> next = allIterator.next();
        assertThat(next.key, equalTo(nextKey));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:9,代码来源:FilteredCacheIteratorTest.java

示例15: shouldPutAll

import org.apache.kafka.common.utils.Bytes; //导入依赖的package包/类
@Test
public void shouldPutAll() throws Exception {
    cache.putAll(Arrays.asList(KeyValue.pair(new byte[] {0}, new LRUCacheEntry(new byte[]{0})),
                               KeyValue.pair(new byte[] {1}, new LRUCacheEntry(new byte[]{1})),
                               KeyValue.pair(new byte[] {2}, new LRUCacheEntry(new byte[]{2}))));

    assertArrayEquals(new byte[]{0}, cache.get(Bytes.wrap(new byte[]{0})).value);
    assertArrayEquals(new byte[]{1}, cache.get(Bytes.wrap(new byte[]{1})).value);
    assertArrayEquals(new byte[]{2}, cache.get(Bytes.wrap(new byte[]{2})).value);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:NamedCacheTest.java


注:本文中的org.apache.kafka.common.utils.Bytes类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。