本文整理汇总了Java中org.apache.kafka.clients.producer.Callback.onCompletion方法的典型用法代码示例。如果您正苦于以下问题:Java Callback.onCompletion方法的具体用法?Java Callback.onCompletion怎么用?Java Callback.onCompletion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.producer.Callback
的用法示例。
在下文中一共展示了Callback.onCompletion方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCallbackWillNotTriggerOnFailedDeliveryOnNoException
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@Test
public void testCallbackWillNotTriggerOnFailedDeliveryOnNoException() {
final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");
unit.send(producer, record, "msg", failedDeliveryCallback);
final ArgumentCaptor<Callback> callbackCaptor = ArgumentCaptor.forClass(Callback.class);
verify(producer).send(Mockito.refEq(record), callbackCaptor.capture());
final Callback callback = callbackCaptor.getValue();
callback.onCompletion(recordMetadata, null);
verify(failedDeliveryCallback, never()).onFailedDelivery(anyString(), any(Throwable.class));
}
示例2: testCallbackWillTriggerOnFailedDeliveryOnException
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@Test
public void testCallbackWillTriggerOnFailedDeliveryOnException() {
final IOException exception = new IOException("KABOOM");
final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");
unit.send(producer, record, "msg", failedDeliveryCallback);
final ArgumentCaptor<Callback> callbackCaptor = ArgumentCaptor.forClass(Callback.class);
verify(producer).send(Mockito.refEq(record), callbackCaptor.capture());
final Callback callback = callbackCaptor.getValue();
callback.onCompletion(recordMetadata, exception);
verify(failedDeliveryCallback).onFailedDelivery("msg", exception);
}
示例3: shouldThrowStreamsExceptionOnSubsequentCallIfASendFails
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnSubsequentCallIfASendFails() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
示例4: shouldThrowStreamsExceptionOnFlushIfASendFailed
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnFlushIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
示例5: shouldThrowStreamsExceptionOnCloseIfASendFailed
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnCloseIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.close();
}
示例6: send
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, final Callback callback) {
boolean error = errorManager.nextError(record.value());
if (errorManager.nextError(record.value()))
{
final Exception e = new Exception();
callback.onCompletion(null, e);
return nullFuture;
}
else {
return super.send(record, callback);
}
}
示例7: send
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
@Override
public synchronized Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) {
callback.onCompletion(null, new SimulatedWriteException());
return super.send(record, callback);
}
示例8: runTest
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
private void runTest(boolean flushOnCheckpoint) throws Throwable {
Properties props = new Properties();
final AtomicBoolean snapshottingFinished = new AtomicBoolean(false);
final TestingKafkaProducer<String> producer = new TestingKafkaProducer<>("someTopic", new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), props,
snapshottingFinished);
producer.setFlushOnCheckpoint(flushOnCheckpoint);
OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink(producer));
testHarness.open();
for (int i = 0; i < 100; i++) {
testHarness.processElement(new StreamRecord<>("msg-" + i));
}
// start a thread confirming all pending records
final Tuple1<Throwable> runnableError = new Tuple1<>(null);
final Thread threadA = Thread.currentThread();
Runnable confirmer = new Runnable() {
@Override
public void run() {
try {
MockProducer mp = producer.getProducerInstance();
List<Callback> pending = mp.getPending();
// we need to find out if the snapshot() method blocks forever
// this is not possible. If snapshot() is running, it will
// start removing elements from the pending list.
synchronized (threadA) {
threadA.wait(500L);
}
// we now check that no records have been confirmed yet
Assert.assertEquals(100, pending.size());
Assert.assertFalse("Snapshot method returned before all records were confirmed",
snapshottingFinished.get());
// now confirm all checkpoints
for (Callback c: pending) {
c.onCompletion(null, null);
}
pending.clear();
} catch(Throwable t) {
runnableError.f0 = t;
}
}
};
Thread threadB = new Thread(confirmer);
threadB.start();
// this should block:
testHarness.snapshot(0, 0);
synchronized (threadA) {
threadA.notifyAll(); // just in case, to let the test fail faster
}
Assert.assertEquals(0, producer.getProducerInstance().getPending().size());
Deadline deadline = FiniteDuration.apply(5, "s").fromNow();
while (deadline.hasTimeLeft() && threadB.isAlive()) {
threadB.join(500);
}
Assert.assertFalse("Thread A is expected to be finished at this point. If not, the test is prone to fail", threadB.isAlive());
if (runnableError.f0 != null) {
throw runnableError.f0;
}
testHarness.close();
}
示例9: runAtLeastOnceTest
import org.apache.kafka.clients.producer.Callback; //导入方法依赖的package包/类
private void runAtLeastOnceTest(boolean flushOnCheckpoint) throws Throwable {
final AtomicBoolean snapshottingFinished = new AtomicBoolean(false);
final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
FakeStandardProducerConfig.get(), null, snapshottingFinished);
producer.setFlushOnCheckpoint(flushOnCheckpoint);
OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink(producer));
testHarness.open();
for (int i = 0; i < 100; i++) {
testHarness.processElement(new StreamRecord<>("msg-" + i));
}
// start a thread confirming all pending records
final Tuple1<Throwable> runnableError = new Tuple1<>(null);
final Thread threadA = Thread.currentThread();
Runnable confirmer = new Runnable() {
@Override
public void run() {
try {
MockProducer mp = producer.getProducerInstance();
List<Callback> pending = mp.getPending();
// we need to find out if the snapshot() method blocks forever
// this is not possible. If snapshot() is running, it will
// start removing elements from the pending list.
synchronized (threadA) {
threadA.wait(500L);
}
// we now check that no records have been confirmed yet
Assert.assertEquals(100, pending.size());
Assert.assertFalse("Snapshot method returned before all records were confirmed",
snapshottingFinished.get());
// now confirm all checkpoints
for (Callback c: pending) {
c.onCompletion(null, null);
}
pending.clear();
} catch(Throwable t) {
runnableError.f0 = t;
}
}
};
Thread threadB = new Thread(confirmer);
threadB.start();
// this should block:
testHarness.snapshot(0, 0);
synchronized (threadA) {
threadA.notifyAll(); // just in case, to let the test fail faster
}
Assert.assertEquals(0, producer.getProducerInstance().getPending().size());
Deadline deadline = FiniteDuration.apply(5, "s").fromNow();
while (deadline.hasTimeLeft() && threadB.isAlive()) {
threadB.join(500);
}
Assert.assertFalse("Thread A is expected to be finished at this point. If not, the test is prone to fail", threadB.isAlive());
if (runnableError.f0 != null) {
throw runnableError.f0;
}
testHarness.close();
}