当前位置: 首页>>代码示例>>Java>>正文


Java Serializer.serialize方法代码示例

本文整理汇总了Java中org.apache.kafka.common.serialization.Serializer.serialize方法的典型用法代码示例。如果您正苦于以下问题:Java Serializer.serialize方法的具体用法?Java Serializer.serialize怎么用?Java Serializer.serialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.common.serialization.Serializer的用法示例。


在下文中一共展示了Serializer.serialize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSerde

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testSerde() {
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();

  String s = LiKafkaClientsTestUtils.getRandomString(100);
  assertEquals(s.length(), 100);
  byte[] stringBytes = stringSerializer.serialize("topic", s);
  assertEquals(stringBytes.length, 100);
  LargeMessageSegment segment =
      new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, stringBytes.length, ByteBuffer.wrap(stringBytes));
  // String bytes + segment header
  byte[] serializedSegment = segmentSerializer.serialize("topic", segment);
  assertEquals(serializedSegment.length, 1 + stringBytes.length + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + 4);

  LargeMessageSegment deserializedSegment = segmentDeserializer.deserialize("topic", serializedSegment);
  assertEquals(deserializedSegment.messageId, segment.messageId);
  assertEquals(deserializedSegment.messageSizeInBytes, segment.messageSizeInBytes);
  assertEquals(deserializedSegment.numberOfSegments, segment.numberOfSegments);
  assertEquals(deserializedSegment.sequenceNumber, segment.sequenceNumber);
  assertEquals(deserializedSegment.payload.limit(), 100);
  String deserializedString = stringDeserializer.deserialize("topic", deserializedSegment.payloadArray());
  assertEquals(deserializedString.length(), s.length());
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:27,代码来源:SerializerDeserializerTest.java

示例2: toBinary

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
public static <K> Bytes toBinary(final Windowed<K> sessionKey, final Serializer<K> serializer, final String topic) {
    final byte[] bytes = serializer.serialize(topic, sessionKey.key());
    ByteBuffer buf = ByteBuffer.allocate(bytes.length + 2 * TIMESTAMP_SIZE);
    buf.put(bytes);
    buf.putLong(sessionKey.window().end());
    buf.putLong(sessionKey.window().start());
    return new Bytes(buf.array());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:9,代码来源:SessionKeySerde.java

示例3: testSerde

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testSerde() {
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();

  String s = TestUtils.getRandomString(100);
  assertEquals(s.length(), 100);
  byte[] stringBytes = stringSerializer.serialize("topic", s);
  assertEquals(stringBytes.length, 100);
  LargeMessageSegment segment =
      new LargeMessageSegment(UUID.randomUUID(), 0, 2, stringBytes.length, ByteBuffer.wrap(stringBytes));
  // String bytes + segment header
  byte[] serializedSegment = segmentSerializer.serialize("topic", segment);
  assertEquals(serializedSegment.length, 1 + stringBytes.length + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + 4);

  LargeMessageSegment deserializedSegment = segmentDeserializer.deserialize("topic", serializedSegment);
  assertEquals(deserializedSegment.messageId, segment.messageId);
  assertEquals(deserializedSegment.messageSizeInBytes, segment.messageSizeInBytes);
  assertEquals(deserializedSegment.numberOfSegments, segment.numberOfSegments);
  assertEquals(deserializedSegment.sequenceNumber, segment.sequenceNumber);
  assertEquals(deserializedSegment.payload.limit(), 100);
  String deserializedString = stringDeserializer.deserialize("topic", deserializedSegment.payloadArray());
  assertEquals(deserializedString.length(), s.length());

}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:28,代码来源:SerializerDeserializerTest.java

示例4: testSplit

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testSplit() {
  TopicPartition tp = new TopicPartition("topic", 0);
  UUID id = UUID.randomUUID();
  String message = TestUtils.getRandomString(1000);
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();
  MessageSplitter splitter = new MessageSplitterImpl(200, segmentSerializer);

  byte[] serializedMessage = stringSerializer.serialize("topic", message);
  List<ProducerRecord<byte[], byte[]>> records = splitter.split("topic", id, serializedMessage);
  assertEquals(records.size(), 5, "Should have 6 segments.");
  MessageAssembler assembler = new MessageAssemblerImpl(10000, 10000, true, segmentDeserializer);
  String assembledMessage = null;
  UUID uuid = null;
  for (int i = 0; i < records.size(); i++) {
    ProducerRecord<byte[], byte[]> record = records.get(i);
    LargeMessageSegment segment = segmentDeserializer.deserialize("topic", record.value());
    if (uuid == null) {
      uuid = segment.messageId;
    } else {
      assertEquals(segment.messageId, uuid, "messageId should match.");
    }
    assertEquals(segment.numberOfSegments, 5, "segment number should be 5");
    assertEquals(segment.messageSizeInBytes, serializedMessage.length, "message size should the same");
    assertEquals(segment.sequenceNumber, i, "SequenceNumber should match");

    assembledMessage = stringDeserializer.deserialize(null, assembler.assemble(tp, i, record.value()).messageBytes());
  }
  assertEquals(assembledMessage, message, "messages should match.");
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:34,代码来源:MessageSplitterTest.java

示例5: testFilter

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  TestUtils.createLargeMessageSegment(UUID.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records);
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:38,代码来源:ConsumerRecordsProcessorTest.java

示例6: write

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
private <T> void write(String topic, Serializer<T> ser, T value, Output output) {
    if( value != null) {
        byte[] valueBytes = ser.serialize(topic, value);
        output.writeInt(valueBytes.length);
        output.write(valueBytes);
    } else {
        output.writeInt(0);
    }
}
 
开发者ID:fhussonnois,项目名称:kafkastreams-cep,代码行数:10,代码来源:ComputationStageSerDe.java

示例7: testSplit

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testSplit() {
  TopicPartition tp = new TopicPartition("topic", 0);
  UUID id = LiKafkaClientsUtils.randomUUID();
  String message = LiKafkaClientsTestUtils.getRandomString(1000);
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();
  MessageSplitter splitter = new MessageSplitterImpl(200, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  byte[] serializedMessage = stringSerializer.serialize("topic", message);
  List<ProducerRecord<byte[], byte[]>> records = splitter.split("topic", id, serializedMessage);
  assertEquals(records.size(), 5, "Should have 6 segments.");
  MessageAssembler assembler = new MessageAssemblerImpl(10000, 10000, true, segmentDeserializer);
  String assembledMessage = null;
  UUID uuid = null;
  for (int i = 0; i < records.size(); i++) {
    ProducerRecord<byte[], byte[]> record = records.get(i);
    LargeMessageSegment segment = segmentDeserializer.deserialize("topic", record.value());
    if (uuid == null) {
      uuid = segment.messageId;
    } else {
      assertEquals(segment.messageId, uuid, "messageId should match.");
    }
    assertEquals(segment.numberOfSegments, 5, "segment number should be 5");
    assertEquals(segment.messageSizeInBytes, serializedMessage.length, "message size should the same");
    assertEquals(segment.sequenceNumber, i, "SequenceNumber should match");

    assembledMessage = stringDeserializer.deserialize(null, assembler.assemble(tp, i, record.value()).messageBytes());
  }
  assertEquals(assembledMessage, message, "messages should match.");
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:34,代码来源:MessageSplitterTest.java

示例8: testFilter

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  LiKafkaClientsTestUtils.createLargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records).consumerRecords();
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:38,代码来源:ConsumerRecordsProcessorTest.java

示例9: testStartingOffsetWithNormalMessages

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:29,代码来源:ConsumerRecordsProcessorTest.java

示例10: send

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Override
public <K, V> void  send(final String topic,
                         final K key,
                         final V value,
                         final Integer partition,
                         final Long timestamp,
                         final Serializer<K> keySerializer,
                         final Serializer<V> valueSerializer) {
    checkForException();
    final byte[] keyBytes = keySerializer.serialize(topic, key);
    final byte[] valBytes = valueSerializer.serialize(topic, value);

    final ProducerRecord<byte[], byte[]> serializedRecord =
            new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);

    // counting from 1 to make check further down more natural
    // -> `if (attempt == MAX_SEND_ATTEMPTS)`
    for (int attempt = 1; attempt <= MAX_SEND_ATTEMPTS; ++attempt) {
        try {
            producer.send(serializedRecord, new Callback() {
                @Override
                public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                    if (exception == null) {
                        if (sendException != null) {
                            return;
                        }
                        final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
                        offsets.put(tp, metadata.offset());
                    } else {
                        if (sendException == null) {
                            sendException = exception;
                            if (sendException instanceof ProducerFencedException) {
                                log.error("{} Error sending record to topic {}. No more offsets will be recorded for this task and it will be closed as it is a zombie.", logPrefix, topic, exception);
                            } else {
                                log.error("{} Error sending record to topic {}. No more offsets will be recorded for this task and the exception will eventually be thrown", logPrefix, topic, exception);
                            }
                        }
                    }
                }
            });
            return;
        } catch (final TimeoutException e) {
            if (attempt == MAX_SEND_ATTEMPTS) {
                throw new StreamsException(String.format("%s Failed to send record to topic %s after %d attempts", logPrefix, topic, attempt));
            }
            log.warn("{} Timeout exception caught when sending record to topic {} attempt {}", logPrefix, topic, attempt);
            Utils.sleep(SEND_RETRY_BACKOFF);
        }

    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:52,代码来源:RecordCollectorImpl.java

示例11: wrapMessageBytes

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
private byte[] wrapMessageBytes(Serializer<LargeMessageSegment> segmentSerializer, byte[] messageBytes) {
  return segmentSerializer.serialize("topic",
      new LargeMessageSegment(UUID.randomUUID(), 0, 1, messageBytes.length, ByteBuffer.wrap(messageBytes)));
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:5,代码来源:MessageAssemblerTest.java

示例12: testSafeOffsetWithoutLargeMessage

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testSafeOffsetWithoutLargeMessage() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 and 1 be a normal record.
  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Let consumer record 1 be a normal message.
  byte[] message1Bytes = stringSerializer.serialize("topic", "message1");
  byte[] message1WrappedBytes = wrapMessageBytes(segmentSerializer, message1Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1WrappedBytes);

  // Construct the consumer records.
  TopicPartition tp = new TopicPartition("topic", 0);
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(tp, recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records);
  Map<TopicPartition, OffsetAndMetadata> safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit();
  assertEquals(safeOffsets.size(), 1, "Safe offsets should contain one entry");
  assertEquals(safeOffsets.get(tp).offset(), 2, "Safe offset of topic partition 0 should be 2");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 0L).longValue(), 1, "safe offset should be 1");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 1L).longValue(), 2, "safe offset should be 2");

  Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();
  offsetMap.put(tp, new OffsetAndMetadata(1L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 1L, "Safe offset of topic partition 0 should be 1");

  offsetMap.put(tp, new OffsetAndMetadata(2L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 2L, "Safe offset of topic partition 0 should be 2");
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:46,代码来源:ConsumerRecordsProcessorTest.java

示例13: testEviction

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
@Test
public void testEviction() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer);

  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  consumerRecordsProcessor.process(getConsumerRecords());
  // The offset tracker now has 2, 4, 5 in it.
  TopicPartition tp = new TopicPartition("topic", 0);

  UUID largeMessageId = UUID.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", TestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage =
      splitter.split("topic", largeMessageId, largeMessage1Bytes);

  // Test evict
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<ConsumerRecord<byte[], byte[]>>();
  // Let consumer record 6 be a large message segment.
  ConsumerRecord<byte[], byte[]> consumerRecord6 =
      new ConsumerRecord<>("topic", 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(0).value());
  // Let consumer record 7 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord7 =
      new ConsumerRecord<>("topic", 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", "message7"));
  // Let consumer record 8 completes consumer record 6
  ConsumerRecord<byte[], byte[]> consumerRecord8 =
      new ConsumerRecord<>("topic", 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(1).value());

  recordList.add(consumerRecord6);
  recordList.add(consumerRecord7);
  recordList.add(consumerRecord8);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);
  consumerRecordsProcessor.process(records);
  // Now the offset tracker should have 4, 5, 6, 8 in side it.
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 7L).longValue(), 6, "safe offset should be 6");

  try {
    consumerRecordsProcessor.safeOffset(tp, 2L);
    fail("Should throw exception because offset for message 2 should have been evicted.");
  } catch (OffsetNotTrackedException onte) {
    assertTrue(onte.getMessage().startsWith("Offset 2 for partition"));
  }
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:49,代码来源:ConsumerRecordsProcessorTest.java

示例14: getConsumerRecords

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
private ConsumerRecords<byte[], byte[]> getConsumerRecords() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer);

  UUID largeMessageId1 = UUID.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", TestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage1 =
      splitter.split("topic", largeMessageId1, largeMessage1Bytes);

  UUID largeMessageId2 = UUID.randomUUID();
  byte[] largeMessage2Bytes = stringSerializer.serialize("topic", TestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage2 =
      splitter.split("topic", largeMessageId2, largeMessage2Bytes);

  // Let consumer record 0 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message0"));
  // Let consumer record 1 be a large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(0).value());
  // Let consumer record 2 be a normal message
  ConsumerRecord<byte[], byte[]> consumerRecord2 =
      new ConsumerRecord<>("topic", 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message1"));
  // Let record 3 be a new large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord3 =
      new ConsumerRecord<>("topic", 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(0).value());
  // let record 4 completes record 3
  ConsumerRecord<byte[], byte[]> consumerRecord4 =
      new ConsumerRecord<>("topic", 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(1).value());
  // let record 5 completes record 1
  ConsumerRecord<byte[], byte[]> consumerRecord5 =
      new ConsumerRecord<>("topic", 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(1).value());

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  recordList.add(consumerRecord2);
  recordList.add(consumerRecord3);
  recordList.add(consumerRecord4);
  recordList.add(consumerRecord5);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  return new ConsumerRecords<>(recordsMap);
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:49,代码来源:ConsumerRecordsProcessorTest.java

示例15: wrapMessageBytes

import org.apache.kafka.common.serialization.Serializer; //导入方法依赖的package包/类
private byte[] wrapMessageBytes(Serializer<LargeMessageSegment> segmentSerializer, byte[] messageBytes) {
  return segmentSerializer.serialize("topic",
                                     new LargeMessageSegment(UUID.randomUUID(), 0, 1, messageBytes.length,
                                                             ByteBuffer.wrap(messageBytes)));
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:6,代码来源:ConsumerRecordsProcessorTest.java


注:本文中的org.apache.kafka.common.serialization.Serializer.serialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。