本文整理匯總了Java中org.apache.kafka.clients.producer.Callback類的典型用法代碼示例。如果您正苦於以下問題:Java Callback類的具體用法?Java Callback怎麽用?Java Callback使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Callback類屬於org.apache.kafka.clients.producer包,在下文中一共展示了Callback類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: sendDataToKafka
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
private void sendDataToKafka(long batchId, byte[] data, Tuple input) {
@SuppressWarnings("rawtypes")
ProducerRecord record = new ProducerRecord<>(outputTopic, "", data);
producer.send(record, new Callback() {
public void onCompletion(RecordMetadata metadata, Exception e) {
synchronized (collector) {
if (e != null) {
collector.fail(input);
logger.error("kafka ack failed to the message which batchId is " + batchId, e);
} else {
collector.ack(input);
logger.debug("kafka ack to the message which batchId is " + batchId, e);
}
}}
});
}
示例2: send
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
/*
// Create wrappedRecord because headers can be read only in record (if record is sent second time)
ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
record.partition(),
record.timestamp(),
record.key(),
record.value(),
record.headers());
*/
try (Scope scope = buildAndInjectSpan(record)) {
Callback wrappedCallback = new TracingCallback(callback, scope);
return producer.send(record, wrappedCallback);
}
}
示例3: test
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Test
public void test() throws Exception {
Producer<Integer, String> producer = createProducer();
// Send 1
producer.send(new ProducerRecord<>("messages", 1, "test"));
// Send 2
producer.send(new ProducerRecord<>("messages", 1, "test"), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals("messages", metadata.topic());
}
});
final CountDownLatch latch = new CountDownLatch(2);
createConsumer(latch, 1);
producer.close();
List<MockSpan> mockSpans = mockTracer.finishedSpans();
assertEquals(4, mockSpans.size());
checkSpans(mockSpans);
assertNull(mockTracer.activeSpan());
}
示例4: sendCruiseControlMetric
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
/**
* Send a CruiseControlMetric to the Kafka topic.
* @param ccm the Cruise Control metric to send.
*/
public void sendCruiseControlMetric(CruiseControlMetric ccm) {
// Use topic name as key if existing so that the same sampler will be able to collect all the information
// of a topic.
String key = ccm.metricClassId() == CruiseControlMetric.MetricClassId.TOPIC_METRIC ?
((TopicMetric) ccm).topic() : Integer.toString(ccm.brokerId());
ProducerRecord<String, CruiseControlMetric> producerRecord =
new ProducerRecord<>(_cruiseControlMetricsTopic, null, ccm.time(), key, ccm);
LOG.debug("Sending Cruise Control metric {}.", ccm);
_producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
LOG.warn("Failed to send Cruise Control metric {}", ccm);
_numMetricSendFailure++;
}
}
});
}
示例5: setUp
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Before
public void setUp() {
super.setUp();
Properties props = new Properties();
props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
AtomicInteger failed = new AtomicInteger(0);
try (Producer<String, String> producer = createProducer(props)) {
for (int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
failed.incrementAndGet();
}
}
});
}
}
assertEquals(0, failed.get());
}
示例6: sendTableStatInfo
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
/***
* send stat info to statistic topic, do not care about success or not.
* @param message
*/
private void sendTableStatInfo(StatMessage message) {
String key = String.format("%s.%s.%s.%s.%s", message.getDsName(), message.getSchemaName(), message.getTableName(),
message.getType(), message.getTxTimeMS());
String value = message.toJSONString();
Callback callback = new Callback() {
@Override
public void onCompletion(RecordMetadata ignored, Exception e) {
if (e != null) {
logger.error(String.format("Send statistic FAIL: toTopic=%s, key=%s", statTopic, key));
} else {
logger.info(String.format(" Send statistic successful: toTopic=%s, key=(%s)", statTopic, key));
}
}
};
Future<RecordMetadata> result = producer.send(new ProducerRecord<>(statTopic, key, value), callback);
}
示例7: sendMessageToKafka
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@SuppressWarnings("unchecked")
private void sendMessageToKafka(String key, DbusMessage dbusMessage, AtomicLong sendCnt, AtomicLong recvCnt, AtomicBoolean isError) throws Exception{
if(stringProducer == null) {
throw new Exception("producer is null, can't send to kafka!");
}
ProducerRecord record = new ProducerRecord<>(resultTopic, key, dbusMessage.toString());
sendCnt.getAndIncrement();
stringProducer.send(record, new Callback() {
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
e.printStackTrace();
isError.set(true);
}else{
recvCnt.getAndIncrement();
}
}
});
}
示例8: run
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Override
public void run() {
System.out.println("Producing to topic " + topic);
String numPartitions = System.getenv().getOrDefault("NUM_PARTITIONS", "1");
System.out.println("Total Partitions " + numPartitions);
while (true) {
try {
producer.send(new ProducerRecord<>(topic, "key-" + rnd.nextInt(10), "val-" + rnd.nextInt(10)),
new Callback() {
@Override
public void onCompletion(RecordMetadata record, Exception excptn) {
System.out.println("Sent data to Offset " + record.offset()
+ " in Partition " + record.partition());
}
});
Thread.sleep(Long.valueOf(producerPause));
} catch (Exception ex) {
Logger.getLogger(Producer.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
示例9: send
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
final FailedDeliveryCallback<E> failedDeliveryCallback) {
try {
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
failedDeliveryCallback.onFailedDelivery(event, exception);
}
}
});
return true;
} catch (BufferExhaustedException e) {
failedDeliveryCallback.onFailedDelivery(event, e);
return false;
}
}
示例10: send
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Override
public Future<RecordMetadata> send(final ProducerRecord<K, V> record, final Callback callback) {
return Retries.tryMe(new IgniteClosure<RetryCallableAsyncOnCallback, Future<RecordMetadata>>() {
@Override
public Future<RecordMetadata> apply(final RetryCallableAsyncOnCallback retryCallableAsyncOnCallback) {
return inner.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
callback.onCompletion(metadata, exception);
if (exception != null) {
retryCallableAsyncOnCallback.retry(exception);
}
}
});
}
});
}
示例11: putSafeWithNoPreviousValueIsPropagated
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Test
public void putSafeWithNoPreviousValueIsPropagated() {
final Converter converter = mock(Converter.class);
final KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
final byte[] value = new byte[0];
final Capture<Struct> statusValueStruct = newCapture();
converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), capture(statusValueStruct));
EasyMock.expectLastCall().andReturn(value);
kafkaBasedLog.send(eq("status-connector-" + CONNECTOR), eq(value), anyObject(Callback.class));
expectLastCall();
replayAll();
final ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.FAILED, WORKER_ID, 0);
store.putSafe(status);
verifyAll();
assertEquals(status.state().toString(), statusValueStruct.getValue().get(KafkaStatusBackingStore.STATE_KEY_NAME));
assertEquals(status.workerId(), statusValueStruct.getValue().get(KafkaStatusBackingStore.WORKER_ID_KEY_NAME));
assertEquals(status.generation(), statusValueStruct.getValue().get(KafkaStatusBackingStore.GENERATION_KEY_NAME));
}
示例12: tryAppend
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
/**
* Try to append to a ProducerBatch.
*
* If it is full, we return null and a new batch is created. We also close the batch for record appends to free up
* resources like compression buffers. The batch will be fully closed (ie. the record batch headers will be written
* and memory records built) in one of the following cases (whichever comes first): right before send,
* if it is expired, or when the producer is closed.
*/
// 查找batches集合對應隊列的最後一個ProducerBatch
private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, Deque<ProducerBatch> deque) {
//拿到消息隊列中的最後一個
ProducerBatch last = deque.peekLast();
if (last != null) {
//調用ProducerBatch的tryAppend方法返回 FutureRecordMetadata future,MemoryRecordsBuilder中是還有空間
FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, time.milliseconds());
if (future == null)
last.closeForRecordAppends();
else
return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false);
}
return null;
}
示例13: send
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
public Future<RecordMetadata> send(Object value, Callback callback) {
DocumentMetadata documentMetadata =
Metadata.getMetadata().getMetadataMap().get(value.getClass());
String topic = documentMetadata.getTopic();
Method method = documentMetadata.getIdMetadata().getMethod();
try {
String key = String.valueOf(method.invoke(value));
return producer.send(new ProducerRecord<>(topic, key, value), callback);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new JkesException(
String.format("Can't invoke method[%s] on object[%s] of class[%s]", method, value, value.getClass()),
e);
}
}
示例14: ErrorLoggingCallback
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
public ErrorLoggingCallback(UUID messageId,
K key,
V value,
String topic,
Long timestamp,
Integer serializedSize,
Auditor<K, V> auditor,
Callback userCallback) {
_messageId = messageId;
_value = value;
_key = key;
_topic = topic;
_timestamp = timestamp;
_serializedSize = serializedSize;
_auditor = auditor;
_userCallback = userCallback;
}
示例15: run
import org.apache.kafka.clients.producer.Callback; //導入依賴的package包/類
@Override
public void run() {
final Set<String> ackedMessages = new HashSet<>();
for (int i = 0; i < MESSAGE_COUNT; i++) {
// The message size is set to 100 - 1124, So we should have most of the messages to be large messages
// while still have some ordinary size messages.
int messageSize = 100 + _random.nextInt() % 1024;
final String messageId = UUID.randomUUID().toString().replace("-", "");
final String message = messageId + TestUtils.getRandomString(messageSize);
_producer.send(new ProducerRecord<String, String>(_topic, null, (long) i, null, message),
new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
// The callback should have been invoked only once.
assertFalse(ackedMessages.contains(messageId));
if (e == null) {
ackedMessages.add(messageId);
}
_messages.put(recordMetadata.topic() + "-" + recordMetadata.partition() + "-" + recordMetadata.offset(), message);
}
});
}
}