当前位置: 首页>>代码示例>>Java>>正文


Java Callback类代码示例

本文整理汇总了Java中org.apache.kafka.clients.producer.Callback的典型用法代码示例。如果您正苦于以下问题:Java Callback类的具体用法?Java Callback怎么用?Java Callback使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Callback类属于org.apache.kafka.clients.producer包,在下文中一共展示了Callback类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sendDataToKafka

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
private void sendDataToKafka(long batchId, byte[] data, Tuple input) {
    @SuppressWarnings("rawtypes")
    ProducerRecord record = new ProducerRecord<>(outputTopic, "", data);

    producer.send(record, new Callback() {
        public void onCompletion(RecordMetadata metadata, Exception e) {
            synchronized (collector) {
            if (e != null) {
                collector.fail(input);
                logger.error("kafka ack failed to the message which batchId is " + batchId, e);
            } else {
                collector.ack(input);
                logger.debug("kafka ack to the message which batchId is " + batchId, e);
            }
        }}
    });
}
 
开发者ID:BriData,项目名称:DBus,代码行数:18,代码来源:KafkaProducerBolt.java

示例2: send

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
  /*
  // Create wrappedRecord because headers can be read only in record (if record is sent second time)
  ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
      record.partition(),
      record.timestamp(),
      record.key(),
      record.value(),
      record.headers());
  */

  try (Scope scope = buildAndInjectSpan(record)) {
    Callback wrappedCallback = new TracingCallback(callback, scope);
    return producer.send(record, wrappedCallback);
  }
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:18,代码来源:TracingKafkaProducer.java

示例3: test

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Test
public void test() throws Exception {
  Producer<Integer, String> producer = createProducer();

  // Send 1
  producer.send(new ProducerRecord<>("messages", 1, "test"));

  // Send 2
  producer.send(new ProducerRecord<>("messages", 1, "test"), new Callback() {
    @Override
    public void onCompletion(RecordMetadata metadata, Exception exception) {
      assertEquals("messages", metadata.topic());
    }
  });

  final CountDownLatch latch = new CountDownLatch(2);
  createConsumer(latch, 1);

  producer.close();

  List<MockSpan> mockSpans = mockTracer.finishedSpans();
  assertEquals(4, mockSpans.size());
  checkSpans(mockSpans);
  assertNull(mockTracer.activeSpan());
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:26,代码来源:TracingKafkaTest.java

示例4: sendCruiseControlMetric

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
/**
 * Send a CruiseControlMetric to the Kafka topic.
 * @param ccm the Cruise Control metric to send.
 */
public void sendCruiseControlMetric(CruiseControlMetric ccm) {
  // Use topic name as key if existing so that the same sampler will be able to collect all the information
  // of a topic.
  String key = ccm.metricClassId() == CruiseControlMetric.MetricClassId.TOPIC_METRIC ?
      ((TopicMetric) ccm).topic() : Integer.toString(ccm.brokerId());
  ProducerRecord<String, CruiseControlMetric> producerRecord =
      new ProducerRecord<>(_cruiseControlMetricsTopic, null, ccm.time(), key, ccm);
  LOG.debug("Sending Cruise Control metric {}.", ccm);
  _producer.send(producerRecord, new Callback() {
    @Override
    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
      if (e != null) {
        LOG.warn("Failed to send Cruise Control metric {}", ccm);
        _numMetricSendFailure++;
      }
    }
  });
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:23,代码来源:CruiseControlMetricsReporter.java

示例5: setUp

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Before
public void setUp() {
  super.setUp();
  Properties props = new Properties();
  props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
  AtomicInteger failed = new AtomicInteger(0);
  try (Producer<String, String> producer = createProducer(props)) {
    for (int i = 0; i < 10; i++) {
      producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
        @Override
        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
          if (e != null) {
            failed.incrementAndGet();
          }
        }
      });
    }
  }
  assertEquals(0, failed.get());
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:21,代码来源:CruiseControlMetricsReporterTest.java

示例6: sendTableStatInfo

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
/***
 * send stat info to statistic topic, do not care about success or not.
 * @param message
 */
private void sendTableStatInfo(StatMessage message) {

    String key = String.format("%s.%s.%s.%s.%s", message.getDsName(), message.getSchemaName(), message.getTableName(),
            message.getType(), message.getTxTimeMS());
    String value = message.toJSONString();

    Callback callback = new Callback() {
        @Override
        public void onCompletion(RecordMetadata ignored, Exception e) {
            if (e != null) {
                logger.error(String.format("Send statistic FAIL: toTopic=%s, key=%s", statTopic, key));
            } else {
                logger.info(String.format("  Send statistic successful: toTopic=%s, key=(%s)", statTopic, key));
            }
        }
    };

    Future<RecordMetadata> result = producer.send(new ProducerRecord<>(statTopic, key, value), callback);
}
 
开发者ID:BriData,项目名称:DBus,代码行数:24,代码来源:MessageProcessor.java

示例7: sendMessageToKafka

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void sendMessageToKafka(String key, DbusMessage dbusMessage, AtomicLong sendCnt, AtomicLong recvCnt, AtomicBoolean isError) throws Exception{
    if(stringProducer == null) {
        throw new Exception("producer is null, can't send to kafka!");
    }

    ProducerRecord record = new ProducerRecord<>(resultTopic, key, dbusMessage.toString());
    sendCnt.getAndIncrement();
    stringProducer.send(record, new Callback() {
        public void onCompletion(RecordMetadata metadata, Exception e) {
            if (e != null) {
                e.printStackTrace();
                isError.set(true);
            }else{
                recvCnt.getAndIncrement();
            }
        }
    });
}
 
开发者ID:BriData,项目名称:DBus,代码行数:20,代码来源:PagedBatchDataFetchingBolt.java

示例8: run

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Override
public void run() {
    System.out.println("Producing to topic " + topic);
    String numPartitions  = System.getenv().getOrDefault("NUM_PARTITIONS", "1");
    System.out.println("Total Partitions " + numPartitions);
    
    while (true) {
        try {
            producer.send(new ProducerRecord<>(topic, "key-" + rnd.nextInt(10), "val-" + rnd.nextInt(10)),
            new Callback() {
                @Override
                public void onCompletion(RecordMetadata record, Exception excptn) {
                    System.out.println("Sent data to Offset " + record.offset()
                            + " in Partition " + record.partition());
                }
            });

            Thread.sleep(Long.valueOf(producerPause));
        } catch (Exception ex) {
            Logger.getLogger(Producer.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

}
 
开发者ID:abhirockzz,项目名称:kafka-javaee-concurrency-utilities,代码行数:25,代码来源:Producer.java

示例9: send

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
                              final FailedDeliveryCallback<E> failedDeliveryCallback) {
    try {
        producer.send(record, new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    failedDeliveryCallback.onFailedDelivery(event, exception);
                }
            }
        });
        return true;
    } catch (BufferExhaustedException e) {
        failedDeliveryCallback.onFailedDelivery(event, e);
        return false;
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:19,代码来源:AsynchronousDeliveryStrategy.java

示例10: send

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Override
public Future<RecordMetadata> send(final ProducerRecord<K, V> record, final Callback callback) {
    return Retries.tryMe(new IgniteClosure<RetryCallableAsyncOnCallback, Future<RecordMetadata>>() {
        @Override
        public Future<RecordMetadata> apply(final RetryCallableAsyncOnCallback retryCallableAsyncOnCallback) {
            return inner.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    callback.onCompletion(metadata, exception);
                    if (exception != null) {
                        retryCallableAsyncOnCallback.retry(exception);
                    }
                }
            });
        }
    });
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:18,代码来源:ProducerProxyRetry.java

示例11: putSafeWithNoPreviousValueIsPropagated

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Test
public void putSafeWithNoPreviousValueIsPropagated() {
    final Converter converter = mock(Converter.class);
    final KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);

    final byte[] value = new byte[0];

    final Capture<Struct> statusValueStruct = newCapture();
    converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), capture(statusValueStruct));
    EasyMock.expectLastCall().andReturn(value);

    kafkaBasedLog.send(eq("status-connector-" + CONNECTOR), eq(value), anyObject(Callback.class));
    expectLastCall();

    replayAll();

    final ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.FAILED, WORKER_ID, 0);
    store.putSafe(status);

    verifyAll();

    assertEquals(status.state().toString(), statusValueStruct.getValue().get(KafkaStatusBackingStore.STATE_KEY_NAME));
    assertEquals(status.workerId(), statusValueStruct.getValue().get(KafkaStatusBackingStore.WORKER_ID_KEY_NAME));
    assertEquals(status.generation(), statusValueStruct.getValue().get(KafkaStatusBackingStore.GENERATION_KEY_NAME));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:KafkaStatusBackingStoreTest.java

示例12: tryAppend

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
/**
 *  Try to append to a ProducerBatch.
 *
 *  If it is full, we return null and a new batch is created. We also close the batch for record appends to free up
 *  resources like compression buffers. The batch will be fully closed (ie. the record batch headers will be written
 *  and memory records built) in one of the following cases (whichever comes first): right before send,
 *  if it is expired, or when the producer is closed.
 */
// 查找batches集合对应队列的最后一个ProducerBatch
private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, Deque<ProducerBatch> deque) {
    //拿到消息队列中的最后一个
    ProducerBatch last = deque.peekLast();
    if (last != null) {
        //调用ProducerBatch的tryAppend方法返回 FutureRecordMetadata future,MemoryRecordsBuilder中是还有空间
        FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, time.milliseconds());
        if (future == null)
            last.closeForRecordAppends();
        else
            return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false);

    }
    return null;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:RecordAccumulator.java

示例13: send

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
public Future<RecordMetadata> send(Object value, Callback callback) {
    DocumentMetadata documentMetadata =
            Metadata.getMetadata().getMetadataMap().get(value.getClass());

    String topic = documentMetadata.getTopic();

    Method method = documentMetadata.getIdMetadata().getMethod();
    try {
        String key = String.valueOf(method.invoke(value));
        return producer.send(new ProducerRecord<>(topic, key, value), callback);
    } catch (IllegalAccessException | InvocationTargetException e) {
        throw new JkesException(
                String.format("Can't invoke method[%s] on object[%s] of class[%s]", method, value, value.getClass()),
                e);
    }
}
 
开发者ID:chaokunyang,项目名称:jkes,代码行数:17,代码来源:JkesKafkaProducer.java

示例14: ErrorLoggingCallback

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
public ErrorLoggingCallback(UUID messageId,
                            K key,
                            V value,
                            String topic,
                            Long timestamp,
                            Integer serializedSize,
                            Auditor<K, V> auditor,
                            Callback userCallback) {
  _messageId = messageId;
  _value = value;
  _key = key;
  _topic = topic;
  _timestamp = timestamp;
  _serializedSize = serializedSize;
  _auditor = auditor;
  _userCallback = userCallback;
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:18,代码来源:LiKafkaProducerImpl.java

示例15: run

import org.apache.kafka.clients.producer.Callback; //导入依赖的package包/类
@Override
public void run() {
  final Set<String> ackedMessages = new HashSet<>();
  for (int i = 0; i < MESSAGE_COUNT; i++) {
    // The message size is set to 100 - 1124, So we should have most of the messages to be large messages
    // while still have some ordinary size messages.
    int messageSize = 100 + _random.nextInt() % 1024;
    final String messageId = UUID.randomUUID().toString().replace("-", "");
    final String message = messageId + TestUtils.getRandomString(messageSize);

    _producer.send(new ProducerRecord<String, String>(_topic, null, (long) i, null, message),
                   new Callback() {
                     @Override
                     public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                       // The callback should have been invoked only once.
                       assertFalse(ackedMessages.contains(messageId));
                       if (e == null) {
                         ackedMessages.add(messageId);
                       }
                       _messages.put(recordMetadata.topic() + "-" + recordMetadata.partition() + "-" + recordMetadata.offset(), message);
                     }
                   });
  }
}
 
开发者ID:becketqin,项目名称:likafka-clients,代码行数:25,代码来源:LiKafkaConsumerIntegrationTest.java


注:本文中的org.apache.kafka.clients.producer.Callback类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。