本文整理汇总了Java中org.apache.kafka.test.TestUtils.toList方法的典型用法代码示例。如果您正苦于以下问题:Java TestUtils.toList方法的具体用法?Java TestUtils.toList怎么用?Java TestUtils.toList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.test.TestUtils
的用法示例。
在下文中一共展示了TestUtils.toList方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFilterToPreservesPartitionLeaderEpoch
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testFilterToPreservesPartitionLeaderEpoch() {
if (magic >= RecordBatch.MAGIC_VALUE_V2) {
int partitionLeaderEpoch = 67;
ByteBuffer buffer = ByteBuffer.allocate(2048);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME,
0L, RecordBatch.NO_TIMESTAMP, partitionLeaderEpoch);
builder.append(10L, null, "a".getBytes());
builder.append(11L, "1".getBytes(), "b".getBytes());
builder.append(12L, null, "c".getBytes());
ByteBuffer filtered = ByteBuffer.allocate(2048);
builder.build().filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), filtered,
Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
filtered.flip();
MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered);
List<MutableRecordBatch> batches = TestUtils.toList(filteredRecords.batches());
assertEquals(1, batches.size());
MutableRecordBatch firstBatch = batches.get(0);
assertEquals(partitionLeaderEpoch, firstBatch.partitionLeaderEpoch());
}
}
示例2: produceRequestMatcher
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
private MockClient.RequestMatcher produceRequestMatcher(final TopicPartition tp,
final ProducerIdAndEpoch producerIdAndEpoch,
final int sequence,
final boolean isTransactional) {
return new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
if (!(body instanceof ProduceRequest))
return false;
ProduceRequest request = (ProduceRequest) body;
Map<TopicPartition, MemoryRecords> recordsMap = request.partitionRecordsOrFail();
MemoryRecords records = recordsMap.get(tp);
if (records == null)
return false;
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
if (batches.isEmpty() || batches.size() > 1)
return false;
MutableRecordBatch batch = batches.get(0);
return batch.baseOffset() == 0L &&
batch.baseSequence() == sequence &&
batch.producerId() == producerIdAndEpoch.producerId &&
batch.producerEpoch() == producerIdAndEpoch.epoch &&
batch.isTransactional() == isTransactional;
}
};
}
示例3: buildDefaultRecordBatchWithSequenceWrapAround
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void buildDefaultRecordBatchWithSequenceWrapAround() {
long pid = 23423L;
short epoch = 145;
int baseSequence = Integer.MAX_VALUE - 1;
ByteBuffer buffer = ByteBuffer.allocate(2048);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.CREATE_TIME, 1234567L, RecordBatch.NO_TIMESTAMP, pid, epoch, baseSequence);
builder.appendWithOffset(1234567, 1L, "a".getBytes(), "v".getBytes());
builder.appendWithOffset(1234568, 2L, "b".getBytes(), "v".getBytes());
builder.appendWithOffset(1234569, 3L, "c".getBytes(), "v".getBytes());
MemoryRecords records = builder.build();
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
assertEquals(1, batches.size());
RecordBatch batch = batches.get(0);
assertEquals(pid, batch.producerId());
assertEquals(epoch, batch.producerEpoch());
assertEquals(baseSequence, batch.baseSequence());
assertEquals(0, batch.lastSequence());
List<Record> allRecords = TestUtils.toList(batch);
assertEquals(3, allRecords.size());
assertEquals(Integer.MAX_VALUE - 1, allRecords.get(0).sequence());
assertEquals(Integer.MAX_VALUE, allRecords.get(1).sequence());
assertEquals(0, allRecords.get(2).sequence());
}
示例4: testReadAndWriteControlBatch
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testReadAndWriteControlBatch() {
long producerId = 1L;
short producerEpoch = 0;
int coordinatorEpoch = 15;
ByteBuffer buffer = ByteBuffer.allocate(128);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE,
CompressionType.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, producerId,
producerEpoch, RecordBatch.NO_SEQUENCE, true, true, RecordBatch.NO_PARTITION_LEADER_EPOCH,
buffer.remaining());
EndTransactionMarker marker = new EndTransactionMarker(ControlRecordType.COMMIT, coordinatorEpoch);
builder.appendEndTxnMarker(System.currentTimeMillis(), marker);
MemoryRecords records = builder.build();
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
assertEquals(1, batches.size());
MutableRecordBatch batch = batches.get(0);
assertTrue(batch.isControlBatch());
List<Record> logRecords = TestUtils.toList(records.records());
assertEquals(1, logRecords.size());
Record commitRecord = logRecords.get(0);
assertEquals(marker, EndTransactionMarker.deserialize(commitRecord));
}
示例5: assertGenericRecordBatchData
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
private void assertGenericRecordBatchData(RecordBatch batch, long baseOffset, long maxTimestamp, SimpleRecord ... records) {
assertEquals(magic, batch.magic());
assertEquals(compression, batch.compressionType());
if (magic == MAGIC_VALUE_V0) {
assertEquals(NO_TIMESTAMP_TYPE, batch.timestampType());
} else {
assertEquals(CREATE_TIME, batch.timestampType());
assertEquals(maxTimestamp, batch.maxTimestamp());
}
assertEquals(baseOffset + records.length - 1, batch.lastOffset());
if (magic >= MAGIC_VALUE_V2)
assertEquals(Integer.valueOf(records.length), batch.countOrNull());
assertEquals(baseOffset, batch.baseOffset());
assertTrue(batch.isValid());
List<Record> batchRecords = TestUtils.toList(batch);
for (int i = 0; i < records.length; i++) {
assertEquals(baseOffset + i, batchRecords.get(i).offset());
assertEquals(records[i].key(), batchRecords.get(i).key());
assertEquals(records[i].value(), batchRecords.get(i).value());
if (magic == MAGIC_VALUE_V0)
assertEquals(NO_TIMESTAMP, batchRecords.get(i).timestamp());
else
assertEquals(records[i].timestamp(), batchRecords.get(i).timestamp());
}
}
示例6: testBuildEndTxnMarker
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testBuildEndTxnMarker() {
if (magic >= RecordBatch.MAGIC_VALUE_V2) {
long producerId = 73;
short producerEpoch = 13;
long initialOffset = 983L;
int coordinatorEpoch = 347;
int partitionLeaderEpoch = 29;
EndTransactionMarker marker = new EndTransactionMarker(ControlRecordType.COMMIT, coordinatorEpoch);
MemoryRecords records = MemoryRecords.withEndTransactionMarker(initialOffset, System.currentTimeMillis(),
partitionLeaderEpoch, producerId, producerEpoch, marker);
// verify that buffer allocation was precise
assertEquals(records.buffer().remaining(), records.buffer().capacity());
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
assertEquals(1, batches.size());
RecordBatch batch = batches.get(0);
assertTrue(batch.isControlBatch());
assertEquals(producerId, batch.producerId());
assertEquals(producerEpoch, batch.producerEpoch());
assertEquals(initialOffset, batch.baseOffset());
assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch());
assertTrue(batch.isValid());
List<Record> createdRecords = TestUtils.toList(batch);
assertEquals(1, createdRecords.size());
Record record = createdRecords.get(0);
assertTrue(record.isValid());
EndTransactionMarker deserializedMarker = EndTransactionMarker.deserialize(record);
assertEquals(ControlRecordType.COMMIT, deserializedMarker.controlType());
assertEquals(coordinatorEpoch, deserializedMarker.coordinatorEpoch());
}
}
示例7: testAppendedChecksumConsistency
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testAppendedChecksumConsistency() {
ByteBuffer buffer = ByteBuffer.allocate(512);
for (byte magic : Arrays.asList(RecordBatch.MAGIC_VALUE_V0, RecordBatch.MAGIC_VALUE_V1, RecordBatch.MAGIC_VALUE_V2)) {
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, magic, compressionType,
TimestampType.CREATE_TIME, 0L, LegacyRecord.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID,
RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, false,
RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity());
Long checksumOrNull = builder.append(1L, "key".getBytes(), "value".getBytes());
MemoryRecords memoryRecords = builder.build();
List<Record> records = TestUtils.toList(memoryRecords.records());
assertEquals(1, records.size());
assertEquals(checksumOrNull, records.get(0).checksumOrNull());
}
}
示例8: testSmallWriteLimit
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testSmallWriteLimit() {
// with a small write limit, we always allow at least one record to be added
byte[] key = "foo".getBytes();
byte[] value = "bar".getBytes();
int writeLimit = 0;
ByteBuffer buffer = ByteBuffer.allocate(512);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType,
TimestampType.CREATE_TIME, 0L, LegacyRecord.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH,
RecordBatch.NO_SEQUENCE, false, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, writeLimit);
assertFalse(builder.isFull());
assertTrue(builder.hasRoomFor(0L, key, value, Record.EMPTY_HEADERS));
builder.append(0L, key, value);
assertTrue(builder.isFull());
assertFalse(builder.hasRoomFor(0L, key, value, Record.EMPTY_HEADERS));
MemoryRecords memRecords = builder.build();
List<Record> records = TestUtils.toList(memRecords.records());
assertEquals(1, records.size());
Record record = records.get(0);
assertEquals(ByteBuffer.wrap(key), record.key());
assertEquals(ByteBuffer.wrap(value), record.value());
}
示例9: batches
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
private static List<RecordBatch> batches(Records buffer) {
return TestUtils.toList(buffer.batches());
}
示例10: testFilterToBatchDiscard
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testFilterToBatchDiscard() {
if (compression != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) {
ByteBuffer buffer = ByteBuffer.allocate(2048);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 0L);
builder.append(10L, "1".getBytes(), "a".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 1L);
builder.append(11L, "2".getBytes(), "b".getBytes());
builder.append(12L, "3".getBytes(), "c".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 3L);
builder.append(13L, "4".getBytes(), "d".getBytes());
builder.append(20L, "5".getBytes(), "e".getBytes());
builder.append(15L, "6".getBytes(), "f".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 6L);
builder.append(16L, "7".getBytes(), "g".getBytes());
builder.close();
buffer.flip();
ByteBuffer filtered = ByteBuffer.allocate(2048);
MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new MemoryRecords.RecordFilter() {
@Override
protected boolean shouldDiscard(RecordBatch batch) {
// discard the second and fourth batches
return batch.lastOffset() == 2L || batch.lastOffset() == 6L;
}
@Override
protected boolean shouldRetain(RecordBatch recordBatch, Record record) {
return true;
}
}, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
filtered.flip();
MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered);
List<MutableRecordBatch> batches = TestUtils.toList(filteredRecords.batches());
assertEquals(2, batches.size());
assertEquals(0L, batches.get(0).lastOffset());
assertEquals(5L, batches.get(1).lastOffset());
}
}
示例11: testFilterToAlreadyCompactedLog
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testFilterToAlreadyCompactedLog() {
ByteBuffer buffer = ByteBuffer.allocate(2048);
// create a batch with some offset gaps to simulate a compacted batch
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compression,
TimestampType.CREATE_TIME, 0L);
builder.appendWithOffset(5L, 10L, null, "a".getBytes());
builder.appendWithOffset(8L, 11L, "1".getBytes(), "b".getBytes());
builder.appendWithOffset(10L, 12L, null, "c".getBytes());
builder.close();
buffer.flip();
ByteBuffer filtered = ByteBuffer.allocate(2048);
MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(),
filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
filtered.flip();
MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered);
List<MutableRecordBatch> batches = TestUtils.toList(filteredRecords.batches());
assertEquals(1, batches.size());
MutableRecordBatch batch = batches.get(0);
List<Record> records = TestUtils.toList(batch);
assertEquals(1, records.size());
assertEquals(8L, records.get(0).offset());
if (magic >= RecordBatch.MAGIC_VALUE_V1)
assertEquals(new SimpleRecord(11L, "1".getBytes(), "b".getBytes()), new SimpleRecord(records.get(0)));
else
assertEquals(new SimpleRecord(RecordBatch.NO_TIMESTAMP, "1".getBytes(), "b".getBytes()),
new SimpleRecord(records.get(0)));
if (magic >= RecordBatch.MAGIC_VALUE_V2) {
// the new format preserves first and last offsets from the original batch
assertEquals(0L, batch.baseOffset());
assertEquals(10L, batch.lastOffset());
} else {
assertEquals(8L, batch.baseOffset());
assertEquals(8L, batch.lastOffset());
}
}
示例12: testFilterToPreservesLogAppendTime
import org.apache.kafka.test.TestUtils; //导入方法依赖的package包/类
@Test
public void testFilterToPreservesLogAppendTime() {
long logAppendTime = System.currentTimeMillis();
ByteBuffer buffer = ByteBuffer.allocate(2048);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compression,
TimestampType.LOG_APPEND_TIME, 0L, logAppendTime, pid, epoch, firstSequence);
builder.append(10L, null, "a".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.LOG_APPEND_TIME, 1L, logAppendTime,
pid, epoch, firstSequence);
builder.append(11L, "1".getBytes(), "b".getBytes());
builder.append(12L, null, "c".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.LOG_APPEND_TIME, 3L, logAppendTime,
pid, epoch, firstSequence);
builder.append(13L, null, "d".getBytes());
builder.append(14L, "4".getBytes(), "e".getBytes());
builder.append(15L, "5".getBytes(), "f".getBytes());
builder.close();
buffer.flip();
ByteBuffer filtered = ByteBuffer.allocate(2048);
MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(),
filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
filtered.flip();
MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered);
List<MutableRecordBatch> batches = TestUtils.toList(filteredRecords.batches());
assertEquals(magic < RecordBatch.MAGIC_VALUE_V2 && compression == CompressionType.NONE ? 3 : 2, batches.size());
for (RecordBatch batch : batches) {
assertEquals(compression, batch.compressionType());
if (magic > RecordBatch.MAGIC_VALUE_V0) {
assertEquals(TimestampType.LOG_APPEND_TIME, batch.timestampType());
assertEquals(logAppendTime, batch.maxTimestamp());
}
}
}