本文整理匯總了Java中org.apache.kafka.clients.producer.Producer.send方法的典型用法代碼示例。如果您正苦於以下問題:Java Producer.send方法的具體用法?Java Producer.send怎麽用?Java Producer.send使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.clients.producer.Producer
的用法示例。
在下文中一共展示了Producer.send方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ACKS_CONFIG, "all");
props.put(RETRIES_CONFIG, 0);
props.put(BATCH_SIZE_CONFIG, 32000);
props.put(LINGER_MS_CONFIG, 100);
props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
Producer<Long, Long> producer = new KafkaProducer<>(props);
long t1 = System.currentTimeMillis();
long i = 0;
for(; i < 1000000; i++) {
producer.send(new ProducerRecord<>("produktion", i, i));
}
producer.send(new ProducerRecord<Long,Long>("produktion", (long) -1, (long)-1));
System.out.println("fertig " + i + " Nachrichten in " + (System.currentTimeMillis() - t1 + " ms"));
producer.close();
}
示例2: test
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
@Test
public void test() throws Exception {
Producer<Integer, String> producer = createProducer();
// Send 1
producer.send(new ProducerRecord<>("messages", 1, "test"));
// Send 2
producer.send(new ProducerRecord<>("messages", 1, "test"), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals("messages", metadata.topic());
}
});
final CountDownLatch latch = new CountDownLatch(2);
createConsumer(latch, 1);
producer.close();
List<MockSpan> mockSpans = mockTracer.finishedSpans();
assertEquals(4, mockSpans.size());
checkSpans(mockSpans);
assertNull(mockTracer.activeSpan());
}
示例3: nullKey
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
@Test
public void nullKey() throws Exception {
Producer<Integer, String> producer = createProducer();
ProducerRecord<Integer, String> record = new ProducerRecord<>("messages", "test");
producer.send(record);
final Map<String, Object> consumerProps = KafkaTestUtils
.consumerProps("sampleRawConsumer", "false", embeddedKafka);
consumerProps.put("auto.offset.reset", "earliest");
final CountDownLatch latch = new CountDownLatch(1);
createConsumer(latch, null);
producer.close();
}
示例4: sendAckInfoToCtrlTopic
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
private static void sendAckInfoToCtrlTopic(String dataSourceInfo, String completedTime, String pullStatus) {
try {
// 在源dataSourceInfo的基礎上,更新全量拉取相關信息。然後發回src topic
JSONObject jsonObj = JSONObject.parseObject(dataSourceInfo);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.FROM_KEY, DataPullConstants.FullPullInterfaceJson.FROM_VALUE);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.TYPE_KEY, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE);
// notifyFullPullRequestor
JSONObject payloadObj = jsonObj.getJSONObject(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY);
// 完成時間
payloadObj.put(DataPullConstants.FullPullInterfaceJson.COMPLETE_TIME_KEY, completedTime);
// 拉取是否成功標誌位
payloadObj.put(DataPullConstants.FullPullInterfaceJson.DATA_STATUS_KEY, pullStatus);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY, payloadObj);
String ctrlTopic = getFullPullProperties(Constants.ZkTopoConfForFullPull.COMMON_CONFIG, true)
.getProperty(Constants.ZkTopoConfForFullPull.FULL_PULL_SRC_TOPIC);
Producer producer = DbusHelper
.getProducer(getFullPullProperties(Constants.ZkTopoConfForFullPull.BYTE_PRODUCER_CONFIG, true));
ProducerRecord record = new ProducerRecord<>(ctrlTopic, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE, jsonObj.toString().getBytes());
Future<RecordMetadata> future = producer.send(record);
RecordMetadata meta = future.get();
}
catch (Exception e) {
Log.error("Error occurred when report full data pulling status.", e);
throw new RuntimeException(e);
}
}
示例5: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.77.7:9094,192.168.77.7:9093,192.168.77.7:9092");
props.put("retries", 0);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
for(int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("test", Long.toString(System.currentTimeMillis()), Integer.toString(i)));
System.out.println("Sent message: " + i);
}
producer.close();
}
示例6: publishDataToKafka
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
/**
* Publish 'numMessages' arbitrary events from live users with the provided delay, to a
* Kafka topic.
*/
public static void publishDataToKafka(int numMessages, int delayInMillis)
throws IOException {
Producer<String, String> producer = new KafkaProducer<>(kafkaProps);
for (int i = 0; i < Math.max(1, numMessages); i++) {
Long currTime = System.currentTimeMillis();
String message = generateEvent(currTime, delayInMillis);
producer.send(new ProducerRecord<String, String>("game", null, message)); //TODO(fjp): Generalize
// TODO(fjp): How do we get late data working?
// if (delayInMillis != 0) {
// System.out.println(pubsubMessage.getAttributes());
// System.out.println("late data for: " + message);
// }
// pubsubMessages.add(pubsubMessage);
}
producer.close();
}
示例7: send
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
final FailedDeliveryCallback<E> failedDeliveryCallback) {
try {
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
failedDeliveryCallback.onFailedDelivery(event, exception);
}
}
});
return true;
} catch (BufferExhaustedException e) {
failedDeliveryCallback.onFailedDelivery(event, e);
return false;
}
}
示例8: sendData
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public void sendData(String data) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
Map<MetricName, ? extends Metric> metrics = producer.metrics();
System.out.println(metrics);
for (int i = 0; i < 100; i++)
producer.send(new ProducerRecord<String, String>("video_view", data));
producer.close();
}
示例9: sendWrapperMessage
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void sendWrapperMessage() throws Exception {
Properties props = new Properties();
props.put("bootstrap.servers", servers);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "com.gochinatv.spark.kafka.SerializedMessage");
Producer<String, WrapperAppMessage> producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
//case 1:
//沒有任何分區,默認1個分區,發送消息
int i=0;
while(i<1000){
Thread.sleep(1000L);
WrapperAppMessage message = new WrapperAppMessage();
message.setAgreeId((i+1)%5);
message.setCityId((i+1)%3);
message.setConnectType((i+1)%4);
message.setCount((i+100)%10);
message.setInstanceId((i+1)%6);
message.setProvinceId((i+1)%4);
message.setTimestamp(System.currentTimeMillis());
message.setValue((float)((i+200)%4));
producer.send(new ProducerRecord<>("NL_U_APP_ALARM_APP",message));
System.out.println(message.toString());
i++;
producer.flush();
}
producer.close();
}
示例10: test
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
@Test
public void test() throws Exception {
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-app");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, senderProps.get("bootstrap.servers"));
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
Producer<Integer, String> producer = createProducer();
ProducerRecord<Integer, String> record = new ProducerRecord<>("stream-test", 1, "test");
producer.send(record);
final Serde<String> stringSerde = Serdes.String();
final Serde<Integer> intSerde = Serdes.Integer();
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, String> kStream = builder
.stream(intSerde, stringSerde, "stream-test");
kStream.map((key, value) -> new KeyValue<>(key, value + "map")).to("stream-out");
KafkaStreams streams = new KafkaStreams(builder, new StreamsConfig(config),
new TracingKafkaClientSupplier(mockTracer));
streams.start();
await().atMost(15, TimeUnit.SECONDS).until(reportedSpansSize(), equalTo(3));
streams.close();
producer.close();
List<MockSpan> spans = mockTracer.finishedSpans();
assertEquals(3, spans.size());
checkSpans(spans);
assertNull(mockTracer.activeSpan());
}
示例11: produceKeyValuesSynchronously
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
/**
* @param topic Kafka topic to write the data records to
* @param records Data records to write to Kafka
* @param producerConfig Kafka producer configuration
* @param <K> Key type of the data records
* @param <V> Value type of the data records
*/
public static <K, V> void produceKeyValuesSynchronously(
String topic, Collection<KeyValue<K, V>> records, Properties producerConfig)
throws ExecutionException, InterruptedException {
Producer<K, V> producer = new KafkaProducer<>(producerConfig);
for (KeyValue<K, V> record : records) {
Future<RecordMetadata> f = producer.send(
new ProducerRecord<>(topic, record.key, record.value));
f.get();
}
producer.flush();
producer.close();
}
開發者ID:kaiwaehner,項目名稱:kafka-streams-machine-learning-examples,代碼行數:20,代碼來源:IntegrationTestUtils.java
示例12: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws IOException, ParseException {
//Kafka Part
Properties properties = new Properties();
//set the kafka boostrap Server
properties.setProperty("bootstrap.servers", KafkaProperties.KAFKA_SERVER_URL);
//tell the client if the key and value is a string or something else
properties.setProperty("key.serializer", StringSerializer.class.getName());
properties.setProperty("value.serializer", StringSerializer.class.getName());
//set the acknowledge of the producer to -1, 0, 1
properties.setProperty("acks", "1");
//if there is no connection how often the client should retry it until it stops
properties.setProperty("retries", "3");
//it will send ever ms a message otherwise use producer.flush() below where marked
properties.setProperty("linger.ms", "1");
//use a truststore and https
properties.setProperty("security.protocol",KafkaProperties.SECURITY_PROTOCOL);
properties.setProperty("ssl.truststore.location", KafkaProperties.TRUSTSTORE_LOCATION);
properties.setProperty("ssl.truststore.password",KafkaProperties.TRUSTSTORE_PASSWORD);
properties.setProperty("ssl.endpoint.identification.algorithm",KafkaProperties.ENDPOINT_ALGORITHM);
Producer<String, String> producer = new org.apache.kafka.clients.producer.KafkaProducer<String, String>(properties);
//Simple Message Producer instead of the for loop => ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("foobar", "2", "Huh!");
for (int key=0; key < 10; key++){
//change here the topic
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(KafkaProperties.TOPIC, Integer.toString(key), "My new keys are here: "+ Integer.toString(key));
producer.send(producerRecord);
}
//here you could use also producer.flush() to send the message
producer.close();
}
示例13: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ACKS_CONFIG, "all");
props.put(RETRIES_CONFIG, 0);
props.put(BATCH_SIZE_CONFIG, 16000);
props.put(LINGER_MS_CONFIG, 100);
props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
long t1 = System.currentTimeMillis();
int i = 0;
for(; i < 10; i++) {
String key = String.valueOf(round(random() * 1000));
double value = new Double(round(random()*10000000L)).intValue()/1000.0;
JsonObject json = Json.createObjectBuilder()
.add("windrad", key)
.add("kw",value)
.build();
producer.send(new ProducerRecord<>("produktion", key, json.toString()));
}
System.out.println("fertig " + i + " Nachrichten in " + (System.currentTimeMillis() - t1 + " ms"));
producer.close();
}
示例14: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
// props.put(ACKS_CONFIG, "all");
// props.put(RETRIES_CONFIG, 0);
// props.put(BATCH_SIZE_CONFIG, 32000);
// props.put(LINGER_MS_CONFIG, 100);
// props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
JsonObject json = Json.createObjectBuilder()
.add("windrad", 6)
.add("kw/h",33)
.build();
String msg= json.toString();
for(int i = 1; i <= 10; i++) {
String key = String.valueOf(round(random() * 1000));
double value = new Double(round(random()*10000000L)).intValue()/1000.0;
producer.send(new ProducerRecord<>("produktion", key,msg ));
}
System.out.println("fertig!");
producer.close();
}
示例15: main
import org.apache.kafka.clients.producer.Producer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ACKS_CONFIG, "all");
props.put(RETRIES_CONFIG, 0);
props.put(BATCH_SIZE_CONFIG, 16000);
props.put(LINGER_MS_CONFIG, 100);
props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, JsonPOJOSerializer.class.getName());
Producer<String, Messung> producer = new KafkaProducer<>(props);
long t1 = System.currentTimeMillis();
String[] types = {"Windrad","Biogas","Biogas","Solar","Windrad","Windrad","Solar","Biogas"};
JsonPOJOSerializer<Messung> serializer = new JsonPOJOSerializer<>();
int i = 0;
for(; i < 3000; i++) {
int key = (int)round(random() * 7);
double value = new Double(round(random()*10000000L)).intValue()/1000.0;
Messung messung = new Messung();
messung.anlage=""+key;
messung.type=types[key];
messung.kw=value;
producer.send(new ProducerRecord<String,Messung>("produktion", "" + key, messung));
Thread.sleep(1000);
}
System.out.println("fertig " + i + " Nachrichten in " + (System.currentTimeMillis() - t1 + " ms"));
producer.close();
}