本文整理匯總了Java中org.apache.kafka.clients.producer.Callback.onCompletion方法的典型用法代碼示例。如果您正苦於以下問題:Java Callback.onCompletion方法的具體用法?Java Callback.onCompletion怎麽用?Java Callback.onCompletion使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.clients.producer.Callback
的用法示例。
在下文中一共展示了Callback.onCompletion方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testCallbackWillNotTriggerOnFailedDeliveryOnNoException
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@Test
public void testCallbackWillNotTriggerOnFailedDeliveryOnNoException() {
final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");
unit.send(producer, record, "msg", failedDeliveryCallback);
final ArgumentCaptor<Callback> callbackCaptor = ArgumentCaptor.forClass(Callback.class);
verify(producer).send(Mockito.refEq(record), callbackCaptor.capture());
final Callback callback = callbackCaptor.getValue();
callback.onCompletion(recordMetadata, null);
verify(failedDeliveryCallback, never()).onFailedDelivery(anyString(), any(Throwable.class));
}
示例2: testCallbackWillTriggerOnFailedDeliveryOnException
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@Test
public void testCallbackWillTriggerOnFailedDeliveryOnException() {
final IOException exception = new IOException("KABOOM");
final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");
unit.send(producer, record, "msg", failedDeliveryCallback);
final ArgumentCaptor<Callback> callbackCaptor = ArgumentCaptor.forClass(Callback.class);
verify(producer).send(Mockito.refEq(record), callbackCaptor.capture());
final Callback callback = callbackCaptor.getValue();
callback.onCompletion(recordMetadata, exception);
verify(failedDeliveryCallback).onFailedDelivery("msg", exception);
}
示例3: shouldThrowStreamsExceptionOnSubsequentCallIfASendFails
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnSubsequentCallIfASendFails() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
示例4: shouldThrowStreamsExceptionOnFlushIfASendFailed
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnFlushIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
示例5: shouldThrowStreamsExceptionOnCloseIfASendFailed
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnCloseIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(
new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
},
"test");
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.close();
}
示例6: send
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, final Callback callback) {
boolean error = errorManager.nextError(record.value());
if (errorManager.nextError(record.value()))
{
final Exception e = new Exception();
callback.onCompletion(null, e);
return nullFuture;
}
else {
return super.send(record, callback);
}
}
示例7: send
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
@Override
public synchronized Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) {
callback.onCompletion(null, new SimulatedWriteException());
return super.send(record, callback);
}
示例8: runTest
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
private void runTest(boolean flushOnCheckpoint) throws Throwable {
Properties props = new Properties();
final AtomicBoolean snapshottingFinished = new AtomicBoolean(false);
final TestingKafkaProducer<String> producer = new TestingKafkaProducer<>("someTopic", new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), props,
snapshottingFinished);
producer.setFlushOnCheckpoint(flushOnCheckpoint);
OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink(producer));
testHarness.open();
for (int i = 0; i < 100; i++) {
testHarness.processElement(new StreamRecord<>("msg-" + i));
}
// start a thread confirming all pending records
final Tuple1<Throwable> runnableError = new Tuple1<>(null);
final Thread threadA = Thread.currentThread();
Runnable confirmer = new Runnable() {
@Override
public void run() {
try {
MockProducer mp = producer.getProducerInstance();
List<Callback> pending = mp.getPending();
// we need to find out if the snapshot() method blocks forever
// this is not possible. If snapshot() is running, it will
// start removing elements from the pending list.
synchronized (threadA) {
threadA.wait(500L);
}
// we now check that no records have been confirmed yet
Assert.assertEquals(100, pending.size());
Assert.assertFalse("Snapshot method returned before all records were confirmed",
snapshottingFinished.get());
// now confirm all checkpoints
for (Callback c: pending) {
c.onCompletion(null, null);
}
pending.clear();
} catch(Throwable t) {
runnableError.f0 = t;
}
}
};
Thread threadB = new Thread(confirmer);
threadB.start();
// this should block:
testHarness.snapshot(0, 0);
synchronized (threadA) {
threadA.notifyAll(); // just in case, to let the test fail faster
}
Assert.assertEquals(0, producer.getProducerInstance().getPending().size());
Deadline deadline = FiniteDuration.apply(5, "s").fromNow();
while (deadline.hasTimeLeft() && threadB.isAlive()) {
threadB.join(500);
}
Assert.assertFalse("Thread A is expected to be finished at this point. If not, the test is prone to fail", threadB.isAlive());
if (runnableError.f0 != null) {
throw runnableError.f0;
}
testHarness.close();
}
示例9: runAtLeastOnceTest
import org.apache.kafka.clients.producer.Callback; //導入方法依賴的package包/類
private void runAtLeastOnceTest(boolean flushOnCheckpoint) throws Throwable {
final AtomicBoolean snapshottingFinished = new AtomicBoolean(false);
final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
FakeStandardProducerConfig.get(), null, snapshottingFinished);
producer.setFlushOnCheckpoint(flushOnCheckpoint);
OneInputStreamOperatorTestHarness<String, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink(producer));
testHarness.open();
for (int i = 0; i < 100; i++) {
testHarness.processElement(new StreamRecord<>("msg-" + i));
}
// start a thread confirming all pending records
final Tuple1<Throwable> runnableError = new Tuple1<>(null);
final Thread threadA = Thread.currentThread();
Runnable confirmer = new Runnable() {
@Override
public void run() {
try {
MockProducer mp = producer.getProducerInstance();
List<Callback> pending = mp.getPending();
// we need to find out if the snapshot() method blocks forever
// this is not possible. If snapshot() is running, it will
// start removing elements from the pending list.
synchronized (threadA) {
threadA.wait(500L);
}
// we now check that no records have been confirmed yet
Assert.assertEquals(100, pending.size());
Assert.assertFalse("Snapshot method returned before all records were confirmed",
snapshottingFinished.get());
// now confirm all checkpoints
for (Callback c: pending) {
c.onCompletion(null, null);
}
pending.clear();
} catch(Throwable t) {
runnableError.f0 = t;
}
}
};
Thread threadB = new Thread(confirmer);
threadB.start();
// this should block:
testHarness.snapshot(0, 0);
synchronized (threadA) {
threadA.notifyAll(); // just in case, to let the test fail faster
}
Assert.assertEquals(0, producer.getProducerInstance().getPending().size());
Deadline deadline = FiniteDuration.apply(5, "s").fromNow();
while (deadline.hasTimeLeft() && threadB.isAlive()) {
threadB.join(500);
}
Assert.assertFalse("Thread A is expected to be finished at this point. If not, the test is prone to fail", threadB.isAlive());
if (runnableError.f0 != null) {
throw runnableError.f0;
}
testHarness.close();
}