当前位置: 首页>>代码示例>>Java>>正文


Java TimestampType.CREATE_TIME属性代码示例

本文整理汇总了Java中org.apache.kafka.common.record.TimestampType.CREATE_TIME属性的典型用法代码示例。如果您正苦于以下问题:Java TimestampType.CREATE_TIME属性的具体用法?Java TimestampType.CREATE_TIME怎么用?Java TimestampType.CREATE_TIME使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.kafka.common.record.TimestampType的用法示例。


在下文中一共展示了TimestampType.CREATE_TIME属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testTimestampPropagation

@Test
public void testTimestampPropagation() throws Exception {
    final Long timestamp = System.currentTimeMillis();
    final TimestampType timestampType = TimestampType.CREATE_TIME;

    expectInitializeTask();
    expectConsumerPoll(1, timestamp, timestampType);
    expectConversionAndTransformation(1);

    Capture<Collection<SinkRecord>> records = EasyMock.newCapture(CaptureType.ALL);

    sinkTask.put(EasyMock.capture(records));

    PowerMock.replayAll();

    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    workerTask.iteration();

    SinkRecord record = records.getValue().iterator().next();

    assertEquals(timestamp, record.timestamp());
    assertEquals(timestampType, record.timestampType());

    PowerMock.verifyAll();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:WorkerSinkTaskTest.java

示例2: testSimpleMock

@Test
public void testSimpleMock() {
    consumer.subscribe(Arrays.asList("test"), new NoOpConsumerRebalanceListener());
    assertEquals(0, consumer.poll(1000).count());
    consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1)));
    // Mock consumers need to seek manually since they cannot automatically reset offsets
    HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
    beginningOffsets.put(new TopicPartition("test", 0), 0L);
    beginningOffsets.put(new TopicPartition("test", 1), 0L);
    consumer.updateBeginningOffsets(beginningOffsets);
    consumer.seek(new TopicPartition("test", 0), 0);
    ConsumerRecord<String, String> rec1 = new ConsumerRecord<String, String>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key1", "value1");
    ConsumerRecord<String, String> rec2 = new ConsumerRecord<String, String>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, "key2", "value2");
    consumer.addRecord(rec1);
    consumer.addRecord(rec2);
    ConsumerRecords<String, String> recs = consumer.poll(1);
    Iterator<ConsumerRecord<String, String>> iter = recs.iterator();
    assertEquals(rec1, iter.next());
    assertEquals(rec2, iter.next());
    assertFalse(iter.hasNext());
    assertEquals(2L, consumer.position(new TopicPartition("test", 0)));
    consumer.commitSync();
    assertEquals(2L, consumer.committed(new TopicPartition("test", 0)).offset());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:MockConsumerTest.java

示例3: iterator

@Test
public void iterator() throws Exception {

    Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records = new LinkedHashMap<>();

    String topic = "topic";
    records.put(new TopicPartition(topic, 0), new ArrayList<ConsumerRecord<Integer, String>>());
    ConsumerRecord<Integer, String> record1 = new ConsumerRecord<>(topic, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, "value1");
    ConsumerRecord<Integer, String> record2 = new ConsumerRecord<>(topic, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, "value2");
    records.put(new TopicPartition(topic, 1), Arrays.asList(record1, record2));
    records.put(new TopicPartition(topic, 2), new ArrayList<ConsumerRecord<Integer, String>>());

    ConsumerRecords<Integer, String> consumerRecords = new ConsumerRecords<>(records);
    Iterator<ConsumerRecord<Integer, String>> iter = consumerRecords.iterator();

    int c = 0;
    for (; iter.hasNext(); c++) {
        ConsumerRecord<Integer, String> record = iter.next();
        assertEquals(1, record.partition());
        assertEquals(topic, record.topic());
        assertEquals(c, record.offset());
    }
    assertEquals(2, c);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:ConsumerRecordsTest.java

示例4: map

static MapTestCase map() {
  MapTestCase testCase = new MapTestCase();

  testCase.map = ImmutableMap.of(
      "firstName", "example",
      "lastName", "user",
      "email", "[email protected]",
      "age", 27
  );
  testCase.record = new SinkRecord(
      "testing",
      1,
      null,
      null,
      null,
      testCase.map,
      1L,
      1484897702123L,
      TimestampType.CREATE_TIME
  );

  return testCase;
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-solr,代码行数:23,代码来源:Records.java

示例5: testInvalidDefaultRecordBatch

@Test
public void testInvalidDefaultRecordBatch() {
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);

    MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out,
                                                            DefaultRecordBatch.CURRENT_MAGIC_VALUE,
                                                            CompressionType.NONE,
                                                            TimestampType.CREATE_TIME,
                                                            0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
    builder.append(10L, "key".getBytes(), "value".getBytes());
    builder.close();
    buffer.flip();

    // Garble the CRC
    buffer.position(17);
    buffer.put("beef".getBytes());
    buffer.position(0);

    subscriptions.assignFromUser(singleton(tp1));
    subscriptions.seek(tp1, 0);

    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fetchResponse(tp1, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(0);

    // the fetchedRecords() should always throw exception due to the bad batch.
    for (int i = 0; i < 2; i++) {
        try {
            fetcher.fetchedRecords();
            fail("fetchedRecords should have raised KafkaException");
        } catch (KafkaException e) {
            assertEquals(0, subscriptions.position(tp1).longValue());
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:37,代码来源:FetcherTest.java

示例6: testNullChecksumInConstructor

@Test
@SuppressWarnings("deprecation")
public void testNullChecksumInConstructor() {
    String key = "key";
    String value = "value";
    long timestamp = 242341324L;
    ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp,
            TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value, new RecordHeaders());
    assertEquals(DefaultRecord.computePartialChecksum(timestamp, key.length(), value.length()), record.checksum());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:ConsumerRecordTest.java

示例7: testFilter

@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  TestUtils.createLargeMessageSegment(UUID.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records);
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:37,代码来源:ConsumerRecordsProcessorTest.java

示例8: testStartingOffsetWithNormalMessages

@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records);

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:28,代码来源:ConsumerRecordsProcessorTest.java

示例9: testFilter

@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  LiKafkaClientsTestUtils.createLargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records).consumerRecords();
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:37,代码来源:ConsumerRecordsProcessorTest.java

示例10: testStartingOffsetWithNormalMessages

@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:28,代码来源:ConsumerRecordsProcessorTest.java

示例11: testNullValue

@Test
public void testNullValue() {
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  ConsumerRecord<byte[], byte[]> consumerRecord =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), null);
  ConsumerRecords<byte[], byte[]> consumerRecords =
      new ConsumerRecords<>(Collections.singletonMap(new TopicPartition("topic", 0), Collections.singletonList(consumerRecord)));
  ConsumerRecords<String, String> processedRecords = consumerRecordsProcessor.process(consumerRecords).consumerRecords();
  assertNull(processedRecords.iterator().next().value());
}
 
开发者ID:linkedin,项目名称:li-apache-kafka-clients,代码行数:10,代码来源:ConsumerRecordsProcessorTest.java

示例12: struct

static StructTestCase struct() {
  StructTestCase testCase = new StructTestCase();

  Schema schema = SchemaBuilder.struct()
      .name("Testing")
      .field("firstName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("lastName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("email", Schema.OPTIONAL_STRING_SCHEMA)
      .field("age", Schema.OPTIONAL_INT32_SCHEMA)
      .build();
  testCase.struct = new Struct(schema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("email", "[email protected]")
      .put("age", 27);
  testCase.record = new SinkRecord(
      "testing",
      1,
      null,
      null,
      null,
      testCase.struct,
      2L,
      1484897702123L,
      TimestampType.CREATE_TIME
  );

  return testCase;
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-solr,代码行数:29,代码来源:Records.java

示例13: consumerRecord

private static ConsumerRecord<String, byte[]> consumerRecord(long offset, String key, byte[] value) {
    return new ConsumerRecord<>(STATUS_TOPIC, 0, offset, System.currentTimeMillis(),
            TimestampType.CREATE_TIME, 0L, 0, 0, key, value);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:4,代码来源:KafkaStatusBackingStoreTest.java

示例14: testSafeOffsetWithoutLargeMessage

@Test
public void testSafeOffsetWithoutLargeMessage() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 and 1 be a normal record.
  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Let consumer record 1 be a normal message.
  byte[] message1Bytes = stringSerializer.serialize("topic", "message1");
  byte[] message1WrappedBytes = wrapMessageBytes(segmentSerializer, message1Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1WrappedBytes);

  // Construct the consumer records.
  TopicPartition tp = new TopicPartition("topic", 0);
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(tp, recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records);
  Map<TopicPartition, OffsetAndMetadata> safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit();
  assertEquals(safeOffsets.size(), 1, "Safe offsets should contain one entry");
  assertEquals(safeOffsets.get(tp).offset(), 2, "Safe offset of topic partition 0 should be 2");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 0L).longValue(), 1, "safe offset should be 1");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 1L).longValue(), 2, "safe offset should be 2");

  Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();
  offsetMap.put(tp, new OffsetAndMetadata(1L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 1L, "Safe offset of topic partition 0 should be 1");

  offsetMap.put(tp, new OffsetAndMetadata(2L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 2L, "Safe offset of topic partition 0 should be 2");
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:45,代码来源:ConsumerRecordsProcessorTest.java

示例15: testEviction

@Test
public void testEviction() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer);

  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  consumerRecordsProcessor.process(getConsumerRecords());
  // The offset tracker now has 2, 4, 5 in it.
  TopicPartition tp = new TopicPartition("topic", 0);

  UUID largeMessageId = UUID.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", TestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage =
      splitter.split("topic", largeMessageId, largeMessage1Bytes);

  // Test evict
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<ConsumerRecord<byte[], byte[]>>();
  // Let consumer record 6 be a large message segment.
  ConsumerRecord<byte[], byte[]> consumerRecord6 =
      new ConsumerRecord<>("topic", 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(0).value());
  // Let consumer record 7 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord7 =
      new ConsumerRecord<>("topic", 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", "message7"));
  // Let consumer record 8 completes consumer record 6
  ConsumerRecord<byte[], byte[]> consumerRecord8 =
      new ConsumerRecord<>("topic", 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(1).value());

  recordList.add(consumerRecord6);
  recordList.add(consumerRecord7);
  recordList.add(consumerRecord8);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);
  consumerRecordsProcessor.process(records);
  // Now the offset tracker should have 4, 5, 6, 8 in side it.
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 7L).longValue(), 6, "safe offset should be 6");

  try {
    consumerRecordsProcessor.safeOffset(tp, 2L);
    fail("Should throw exception because offset for message 2 should have been evicted.");
  } catch (OffsetNotTrackedException onte) {
    assertTrue(onte.getMessage().startsWith("Offset 2 for partition"));
  }
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:48,代码来源:ConsumerRecordsProcessorTest.java


注:本文中的org.apache.kafka.common.record.TimestampType.CREATE_TIME属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。