本文整理汇总了Java中org.apache.kafka.clients.consumer.MockConsumer类的典型用法代码示例。如果您正苦于以下问题:Java MockConsumer类的具体用法?Java MockConsumer怎么用?Java MockConsumer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MockConsumer类属于org.apache.kafka.clients.consumer包,在下文中一共展示了MockConsumer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
return Collections.singletonList(partitionInfo);
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 10);
changelogReader.validatePartitionExists(topicPartition, "store");
}
示例2: shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
throw new TimeoutException("KABOOM!");
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 5);
try {
changelogReader.validatePartitionExists(topicPartition, "store");
fail("Should have thrown streams exception");
} catch (final StreamsException e) {
// pass
}
}
示例3: shouldRequestPartitionInfoIfItDoesntExist
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldRequestPartitionInfoIfItDoesntExist() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public Map<String, List<PartitionInfo>> listTopics() {
return Collections.emptyMap();
}
};
consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(partitionInfo));
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, Time.SYSTEM, 5000);
changelogReader.validatePartitionExists(topicPartition, "store");
}
示例4: before
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Before
public void before() throws IOException {
final Map<String, String> storeToTopic = new HashMap<>();
storeToTopic.put("t1-store", "t1");
storeToTopic.put("t2-store", "t2");
final Map<StateStore, ProcessorNode> storeToProcessorNode = new HashMap<>();
store1 = new NoOpReadOnlyStore<>("t1-store");
storeToProcessorNode.put(store1, new MockProcessorNode(-1));
store2 = new NoOpReadOnlyStore("t2-store");
storeToProcessorNode.put(store2, new MockProcessorNode(-1));
topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(),
Collections.<String, SourceNode>emptyMap(),
Collections.<String, SinkNode>emptyMap(),
Collections.<StateStore>emptyList(),
storeToTopic,
Arrays.<StateStore>asList(store1, store2));
context = new NoOpProcessorContext();
stateDirPath = TestUtils.tempDirectory().getPath();
stateDirectory = new StateDirectory("appId", stateDirPath, time);
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
stateManager = new GlobalStateManagerImpl(topology, consumer, stateDirectory);
checkpointFile = new File(stateManager.baseDir(), ProcessorStateManager.CHECKPOINT_FILE_NAME);
}
示例5: shouldThrowStreamsExceptionOnStartupIfExceptionOccurred
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnStartupIfExceptionOccurred() throws Exception {
final MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
throw new RuntimeException("KABOOM!");
}
};
globalStreamThread = new GlobalStreamThread(builder.buildGlobalStateTopology(),
config,
mockConsumer,
new StateDirectory("appId", TestUtils.tempDirectory().getPath(), time),
new Metrics(),
new MockTime(),
"clientId");
try {
globalStreamThread.start();
fail("Should have thrown StreamsException if start up failed");
} catch (StreamsException e) {
assertThat(e.getCause(), instanceOf(RuntimeException.class));
assertThat(e.getCause().getMessage(), equalTo("KABOOM!"));
}
assertFalse(globalStreamThread.stillRunning());
}
示例6: testConsume
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Test
public void testConsume(TestContext ctx) throws Exception {
MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
Async doneLatch = ctx.async();
consumer.handler(record -> {
ctx.assertEquals("the_topic", record.topic());
ctx.assertEquals(0, record.partition());
ctx.assertEquals("abc", record.key());
ctx.assertEquals("def", record.value());
consumer.close(v -> doneLatch.complete());
});
consumer.subscribe(Collections.singleton("the_topic"), v -> {
mock.schedulePollTask(()-> {
mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def"));
mock.seek(new TopicPartition("the_topic", 0), 0L);
});
});
}
示例7: createPublisher
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Override public Publisher<ConsumerRecord<Long, Double>> createPublisher(final long l) {
long nRecords = 100;
mockConsumer = new MockConsumer<Long, Double>(OffsetResetStrategy.LATEST);
mockConsumer.assign(Arrays.asList(topicPartition));
final HashMap<TopicPartition, Long> topicPartitionLongHashMap = new HashMap<>();
topicPartitionLongHashMap.put(topicPartition, 0L);
mockConsumer.updateBeginningOffsets(topicPartitionLongHashMap);
topicPartitionLongHashMap.put(topicPartition, nRecords - 1);
mockConsumer.updateEndOffsets(topicPartitionLongHashMap);
final Random random = new Random();
for (int i = 0; i < nRecords; i++)
mockConsumer.addRecord(
new ConsumerRecord<Long, Double>(
topicPartition.topic(),
topicPartition.partition(),
i,
random.nextLong(),
random.nextDouble()));
return new KafkaPublisher<Long, Double>(mockConsumer, 100, Executors.newSingleThreadExecutor());
}
示例8: testConsume
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Test
public void testConsume() throws Exception {
Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test"));
MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE);
consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0)));
HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(new TopicPartition("test_topic", 0), 0L);
consumer.updateBeginningOffsets(beginningOffsets);
ConsumerRecord<String, String> record0 = new ConsumerRecord<>("test_topic", 0, 0L, "key", "value0");
ConsumerRecord<String, String> record1 = new ConsumerRecord<>("test_topic", 0, 1L, "key", "value1");
ConsumerRecord<String, String> record2 = new ConsumerRecord<>("test_topic", 0, 2L, "key", "value2");
consumer.addRecord(record0);
consumer.addRecord(record1);
consumer.addRecord(record2);
try (Kafka09ConsumerClient<String, String> kafka09Client = new Kafka09ConsumerClient<>(testConfig, consumer);) {
// Consume from 0 offset
Set<KafkaConsumerRecord> consumedRecords =
Sets.newHashSet(kafka09Client.consume(new KafkaPartition.Builder().withId(0).withTopicName("test_topic")
.build(), 0l, 100l));
Set<Kafka09ConsumerRecord<String, String>> expected =
ImmutableSet.<Kafka09ConsumerRecord<String, String>> of(new Kafka09ConsumerRecord<>(record0),
new Kafka09ConsumerRecord<>(record1), new Kafka09ConsumerRecord<>(record2));
Assert.assertEquals(consumedRecords, expected);
}
}
示例9: setUp
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
store = PowerMock.createPartialMock(KafkaBasedLog.class, new String[]{"createConsumer", "createProducer"},
TOPIC, PRODUCER_PROPS, CONSUMER_PROPS, consumedCallback, time, initializer);
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
consumer.updatePartitions(TOPIC, Arrays.asList(TPINFO0, TPINFO1));
Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(TP0, 0L);
beginningOffsets.put(TP1, 0L);
consumer.updateBeginningOffsets(beginningOffsets);
}
示例10: shouldThrowStreamsExceptionWhenTimeoutExceptionThrown
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionWhenTimeoutExceptionThrown() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public Map<String, List<PartitionInfo>> listTopics() {
throw new TimeoutException("KABOOM!");
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 0);
try {
changelogReader.validatePartitionExists(topicPartition, "store");
fail("Should have thrown streams exception");
} catch (final StreamsException e) {
// pass
}
}
示例11: mockConsumer
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
private Consumer mockConsumer(final RuntimeException toThrow) {
return new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public OffsetAndMetadata committed(final TopicPartition partition) {
throw toThrow;
}
};
}
示例12: testBatch
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Test
public void testBatch(TestContext ctx) throws Exception {
int num = 50;
MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
Async doneLatch = ctx.async();
AtomicInteger count = new AtomicInteger();
consumer.handler(record -> {
int val = count.getAndIncrement();
if (val < num) {
ctx.assertEquals("the_topic", record.topic());
ctx.assertEquals(0, record.partition());
ctx.assertEquals("key-" + val, record.key());
ctx.assertEquals("value-" + val, record.value());
if (val == num - 1) {
consumer.close(v -> doneLatch.complete());
}
}
});
consumer.subscribe(Collections.singleton("the_topic"), v -> {
mock.schedulePollTask(() -> {
mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
mock.seek(new TopicPartition("the_topic", 0), 0);
for (int i = 0;i < num;i++) {
mock.addRecord(new ConsumerRecord<>("the_topic", 0, i, "key-" + i, "value-" + i));
}
});
});
}
示例13: shouldInitializeRestoreConsumerWithOffsetsFromStandbyTasks
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Test
public void shouldInitializeRestoreConsumerWithOffsetsFromStandbyTasks() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey().count("count-one");
builder.stream("t2").groupByKey().count("count-two");
final StreamThread thread = new StreamThread(
builder,
config,
clientSupplier,
applicationId,
clientId,
processId,
metrics,
mockTime,
new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog",
Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog",
0,
null,
new Node[0],
new Node[0])));
restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog",
Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog",
0,
null,
new Node[0],
new Node[0])));
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
final TopicPartition t1 = new TopicPartition("t1", 0);
standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1));
thread.setPartitionAssignor(new StreamPartitionAssignor() {
@Override
Map<TaskId, Set<TopicPartition>> standbyTasks() {
return standbyTasks;
}
});
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0))));
// assign an existing standby plus a new one
standbyTasks.put(new TaskId(1, 0), Utils.mkSet(new TopicPartition("t2", 0)));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0),
new TopicPartition("stream-thread-test-count-two-changelog", 0))));
}
示例14: shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
@Test
public void shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey().count("count-one");
builder.stream("t2").groupByKey().count("count-two");
final StreamThread thread = new StreamThread(
builder,
config,
clientSupplier,
applicationId,
clientId,
processId,
metrics,
mockTime,
new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog",
Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog",
0,
null,
new Node[0],
new Node[0])));
restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog",
Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog",
0,
null,
new Node[0],
new Node[0])));
final HashMap<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 0), 0L);
offsets.put(new TopicPartition("stream-thread-test-count-two-changelog", 0), 0L);
restoreConsumer.updateEndOffsets(offsets);
restoreConsumer.updateBeginningOffsets(offsets);
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
final TopicPartition t1 = new TopicPartition("t1", 0);
standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1));
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final TopicPartition t2 = new TopicPartition("t2", 0);
activeTasks.put(new TaskId(1, 0), Utils.mkSet(t2));
thread.setPartitionAssignor(new StreamPartitionAssignor() {
@Override
Map<TaskId, Set<TopicPartition>> standbyTasks() {
return standbyTasks;
}
@Override
Map<TaskId, Set<TopicPartition>> activeTasks() {
return activeTasks;
}
});
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t2));
// swap the assignment around and make sure we don't get any exceptions
standbyTasks.clear();
activeTasks.clear();
standbyTasks.put(new TaskId(1, 0), Utils.mkSet(t2));
activeTasks.put(new TaskId(0, 0), Utils.mkSet(t1));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t1));
}
示例15: MockInternalTopicManager
import org.apache.kafka.clients.consumer.MockConsumer; //导入依赖的package包/类
public MockInternalTopicManager(StreamsConfig streamsConfig, MockConsumer<byte[], byte[]> restoreConsumer) {
super(new StreamsKafkaClient(streamsConfig), 0, 0, new MockTime());
this.restoreConsumer = restoreConsumer;
}