当前位置: 首页>>代码示例>>Java>>正文


Java KafkaProducer.send方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.producer.KafkaProducer.send方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaProducer.send方法的具体用法?Java KafkaProducer.send怎么用?Java KafkaProducer.send使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.producer.KafkaProducer的用法示例。


在下文中一共展示了KafkaProducer.send方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:9092");
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.LongSerializer");
    props.put("linger.ms", 0);

    KafkaProducer<String, Long> producer = new KafkaProducer<>(props);

    for(int i=0; i < 10000; i++){
        String ip = "127.0.0." + i % 10;
        System.out.println(ip);
        producer.send(new ProducerRecord<>("visits", ip, System.currentTimeMillis() + i));
    }

    producer.close();

}
 
开发者ID:ftrossbach,项目名称:kiqr,代码行数:19,代码来源:TestDriver.java

示例2: publishDummyData

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public void publishDummyData() {
    final String topic = "TestTopic";

    // Create publisher
    final Map<String, Object> config = new HashMap<>();
    config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    final KafkaProducer<String, String> producer = new KafkaProducer<>(config);
    for (int charCode = 65; charCode < 91; charCode++) {
        final char[] key = new char[1];
        key[0] = (char) charCode;

        producer.send(new ProducerRecord<>(topic, new String(key), new String(key)));
    }
    producer.flush();
    producer.close();
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:20,代码来源:WebKafkaConsumerTest.java

示例3: publishDummyDataNumbers

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public void publishDummyDataNumbers() {
    final String topic = "NumbersTopic";

    // Create publisher
    final Map<String, Object> config = new HashMap<>();
    config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(config);
    for (int value = 0; value < 10000; value++) {
        producer.send(new ProducerRecord<>(topic, value, value));
    }
    producer.flush();
    producer.close();
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:17,代码来源:WebKafkaConsumerTest.java

示例4: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(final String[] args) {
    IPLogProducer ipLogProducer = new IPLogProducer();
    Properties producerProps = new Properties();

    //replace broker ip with your kafka broker ip
    producerProps.put("bootstrap.servers", "localhost:9092");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("auto.create.topics.enable","true");

    KafkaProducer<String, String> ipProducer = new KafkaProducer<String, String>(producerProps);

    try (Scanner scanner = new Scanner(ipLogProducer.readfile())) {
        while (scanner.hasNextLine()) {
            String line = scanner.nextLine();
            ProducerRecord ipData = new ProducerRecord<String, String>("iplog", line);
            Future<RecordMetadata> recordMetadata = ipProducer.send(ipData);
        }
        scanner.close();

    } catch (IOException e) {
        e.printStackTrace();
    }
    ipProducer.close();
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:IPLogProducer.java

示例5: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zkUrl = commandLine.getOptionValue(ZOOKEEPER);
  String topic = commandLine.getOptionValue(TOPIC);
  int numMessages = Integer.parseInt(commandLine.getOptionValue(NUM_MESSAGES));

  Random random = new Random();
  Properties props = OperatorUtil.createKafkaProducerProperties(zkUrl);
  KafkaProducer kafkaProducer = new KafkaProducer<>(props);

  byte[] key = new byte[16];
  byte[] data = new byte[1024];
  for (int i = 0; i < numMessages; i++) {
    for (int j = 0; j < data.length; j++) {
      data[j] = (byte)random.nextInt();
    }
    ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord(
        topic, 0, System.currentTimeMillis(), key, data);
    Future<RecordMetadata> future = kafkaProducer.send(producerRecord);
    future.get();
    if (i % 100 == 0) {
      System.out.println("Have wrote " + i + " messages to kafka");
    }
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:26,代码来源:KafkaWriter.java

示例6: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
/**
	 * @param args
	 */
	public static void main(String[] args) {
		
		Properties props=new Properties();
		props.put("bootstrap.servers", "localhost:9092,localhost:9093");
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		
		KafkaProducer<String,String> sampleProducer= new KafkaProducer<String,String>(props);
		
//		ProducerRecord<String, String> record = new ProducerRecord<String, String>(topicName, value);		
//		sampleProducer.send(record);
		for (int i = 0; i < 10; i++)
			sampleProducer.send(new ProducerRecord<String, String>("demo-topic1","Data:"+ Integer.toString(i)));
		sampleProducer.close();
	}
 
开发者ID:sarojrout,项目名称:spring-tutorial,代码行数:19,代码来源:SampleProducer.java

示例7: run

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
@Override
public void run() {
    PropertyReader propertyReader = new PropertyReader();

    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", propertyReader.getPropertyValue("broker.list"));
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("auto.create.topics.enable", "true");

    KafkaProducer<String, String> ipProducer = new KafkaProducer<String, String>(producerProps);

    BufferedReader br = readFile();
    String oldLine = "";
    try {
        while ((oldLine = br.readLine()) != null) {
            String line = getNewRecordWithRandomIP(oldLine).replace("[", "").replace("]", "");
            ProducerRecord ipData = new ProducerRecord<String, String>(propertyReader.getPropertyValue("topic"), line);
            Future<RecordMetadata> recordMetadata = ipProducer.send(ipData);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
    ipProducer.close();
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:IPLogProducer.java

示例8: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String args[]) {
	Properties properties = new Properties();
	 
	properties.put("bootstrap.servers", "localhost:9092");
	properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	properties.put("acks", "1");
	 
	KafkaProducer<Integer, String> producer = new KafkaProducer<Integer, String>(properties);
	int counter =0;
	int nbrOfEventsRequired = Integer.parseInt(args[0]);
	while (counter<nbrOfEventsRequired) {
		StringBuffer stream = new StringBuffer();
		
		long phoneNumber = ThreadLocalRandom.current().nextLong(9999999950l,
				9999999999l);
		int bin = ThreadLocalRandom.current().nextInt(100000, 9999999);
		int bout = ThreadLocalRandom.current().nextInt(100000, 9999999);
		
		stream.append(phoneNumber);
		stream.append(",");
		stream.append(bin);
		stream.append(",");
		stream.append(bout);
		stream.append(",");
		stream.append(System.currentTimeMillis());

		System.out.println(stream.toString());
		ProducerRecord<Integer, String> data = new ProducerRecord<Integer, String>(
				"device-data", stream.toString());
		producer.send(data);
		counter++;
	}
	
	producer.close();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:37,代码来源:DataGenerator.java

示例9: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String args[]) {
	Properties properties = new Properties();
	 
	properties.put("bootstrap.servers", "localhost:9092");
	properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	properties.put("acks", "1");
	 
	KafkaProducer<Integer, String> producer = new KafkaProducer<Integer, String>(properties);
	int counter =0;
	int nbrOfEventsRequired = Integer.parseInt(args[0]);
	while (counter<nbrOfEventsRequired) {
		StringBuffer stream = new StringBuffer();
		
		long phoneNumber = ThreadLocalRandom.current().nextLong(9999999950l,
				9999999960l);
		int bin = ThreadLocalRandom.current().nextInt(1000, 9999);
		int bout = ThreadLocalRandom.current().nextInt(1000, 9999);
		
		stream.append(phoneNumber);
		stream.append(",");
		stream.append(bin);
		stream.append(",");
		stream.append(bout);
		stream.append(",");
		stream.append(new Date(ThreadLocalRandom.current().nextLong()));

		System.out.println(stream.toString());
		ProducerRecord<Integer, String> data = new ProducerRecord<Integer, String>(
				"storm-trident-diy", stream.toString());
		producer.send(data);
		counter++;
	}
	
	producer.close();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:37,代码来源:DataGenerator.java

示例10: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
/**
 *
 * @param args
 * @throws InterruptedException
 */
public static void main(String[] args) throws InterruptedException {
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:9092");
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");

    Schema.Parser parser = new Schema.Parser();
    Schema schema = parser.parse(USER_SCHEMA);
    Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(schema);

    KafkaProducer<String, byte[]> producer = new KafkaProducer<>(props);
    SplittableRandom random = new SplittableRandom();

    while (true) {
        GenericData.Record avroRecord = new GenericData.Record(schema);
        avroRecord.put("str1", "Str 1-" + random.nextInt(10));
        avroRecord.put("str2", "Str 2-" + random.nextInt(1000));
        avroRecord.put("int1", random.nextInt(10000));

        byte[] bytes = recordInjection.apply(avroRecord);

        ProducerRecord<String, byte[]> record = new ProducerRecord<>("mytopic", bytes);
        producer.send(record);
        Thread.sleep(100);
    }

}
 
开发者ID:Neuw84,项目名称:structured-streaming-avro-demo,代码行数:33,代码来源:GeneratorDemo.java

示例11: sendMessage

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public void sendMessage(String msg) {
    KafkaProducer<String,String> producer = new KafkaProducer<String, String>(properties);
    ProducerRecord<String,String> record = new ProducerRecord<String, String>(properties.getProperty("topic"),msg);
    producer.send(record);
    producer.close();
}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:7,代码来源:KafkaProducers.java

示例12: generate

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
static void generate(final String kafka) throws Exception {

        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                isRunning = false;
            }
        });

        final Properties producerProps = new Properties();
        producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "SmokeTest");
        producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
        producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
        producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);

        final KafkaProducer<String, Integer> producer = new KafkaProducer<>(producerProps);

        final Random rand = new Random(System.currentTimeMillis());

        int numRecordsProduced = 0;
        while (isRunning) {
            final String key = "" + rand.nextInt(MAX_NUMBER_OF_KEYS);
            final int value = rand.nextInt(10000);

            final ProducerRecord<String, Integer> record = new ProducerRecord<>("data", key, value);

            producer.send(record, new Callback() {
                @Override
                public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                    if (exception != null) {
                        exception.printStackTrace();
                        Exit.exit(1);
                    }
                }
            });

            numRecordsProduced++;
            if (numRecordsProduced % 1000 == 0) {
                System.out.println(numRecordsProduced + " records produced");
            }
            Utils.sleep(rand.nextInt(50));
        }
        producer.close();
        System.out.println(numRecordsProduced + " records produced");
    }
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:47,代码来源:EosTestDriver.java

示例13: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    // acks是判断消息是否成功发送的条件,将acks指定为"all"将会阻塞消息,
    // 当所有的副本都返回后才表明该消息发送成功,这种设置性能最低,但是是最可靠的。
    configs.put("acks", "all");
    // retries表示重试的次数,如果请求失败,生产者会自动重试。如果启用重试,则会有重复消息的可能性。
    configs.put("retries", 0);
    // batch.size指定了缓冲区的大小,kafka的producer会缓存每个分区未发送消息。
    configs.put("batch.size", 16384);
    // linger.ms指示生产者发送请求前等待一段时间,等待更多的消息来填满缓冲区。
    // 默认缓冲可立即发送,即使缓冲空间还没有满。但是,如果想减少请求的数量,可以设置linger.time大于0。
    // 如果我们没有填满缓冲区,这个设置将增加1毫秒的延迟请求以等待更多的消息。
    // 需要注意的是,在高负载下,相近的时间一般也会组成批,即使linger.time=0。
    // 在不处于高负载的情况下,如果设置比0大,以少量的延迟代价换取更少的,更有效的请求。
    configs.put("linger.ms", 1);
    // 控制生产者可用的缓存总量,如果消息发送速度比其传输到服务器的快,将会耗尽这个缓存空间。
    // 当缓存空间耗尽,其他发送调用将被阻塞,阻塞时间的阈值通过max.block.ms设定,之后它将抛出一个TimeoutException。
    configs.put("buffer.memory", 33554432);
    // 将用户提供的key和value对象ProducerRecord转换成字节
    configs.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    configs.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer<String, String> producer = new KafkaProducer<String, String>(configs);
    ProducerRecord<String, String> record = null;

    long index = 0L;
    boolean controller = true;
    while (controller) {
        index++;
        System.out.println(index + "------------");
        record = new ProducerRecord<String, String>(KAFKA_TOPIC, "record-" + index);
        // 生产者的send()方法是异步的,send()
        // 方法添加消息到缓冲区等待发送,并立即返回,这样可以并行发送多条消息而不阻塞去等待每一条消息的响应。为了减少请求的数量,生产者将单个的消息聚集在一起批量发送来提高效率。
        Future<RecordMetadata> future = producer.send(record);
        try {
            RecordMetadata recordMetadata = future.get();
            System.out.format("PARTITION: %d OFFSET: %d\n", recordMetadata.partition(), recordMetadata.offset());
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
    producer.close();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:44,代码来源:ProducerDemo.java

示例14: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {
    System.out.println("StreamsTest instance started");

    final String kafka = args.length > 0 ? args[0] : "localhost:9092";
    final String stateDirStr = args.length > 1 ? args[1] : TestUtils.tempDirectory().getAbsolutePath();
    final boolean eosEnabled = args.length > 2 ? Boolean.parseBoolean(args[2]) : false;

    final File stateDir = new File(stateDirStr);
    stateDir.mkdir();

    final Properties streamsProperties = new Properties();
    streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-system-test-broker-compatibility");
    streamsProperties.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.toString());
    streamsProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
    if (eosEnabled) {
        streamsProperties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
    }
    final int timeout = 6000;
    streamsProperties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), timeout);
    streamsProperties.put(StreamsConfig.consumerPrefix(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG), timeout);
    streamsProperties.put(StreamsConfig.REQUEST_TIMEOUT_MS_CONFIG, timeout + 1);


    final KStreamBuilder builder = new KStreamBuilder();
    builder.stream(SOURCE_TOPIC).to(SINK_TOPIC);

    final KafkaStreams streams = new KafkaStreams(builder, streamsProperties);
    streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override
        public void uncaughtException(final Thread t, final Throwable e) {
            System.out.println("FATAL: An unexpected exception is encountered on thread " + t + ": " + e);

            streams.close(30, TimeUnit.SECONDS);
        }
    });
    System.out.println("start Kafka Streams");
    streams.start();


    System.out.println("send data");
    final Properties producerProperties = new Properties();
    producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

    final KafkaProducer<String, String> producer = new KafkaProducer<>(producerProperties);
    producer.send(new ProducerRecord<>(SOURCE_TOPIC, "key", "value"));


    System.out.println("wait for result");
    loopUntilRecordReceived(kafka, eosEnabled);


    System.out.println("close Kafka Streams");
    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:61,代码来源:BrokerCompatibilityTest.java

示例15: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    // set up the producer
    KafkaProducer<String, String> producer;
    try (InputStream props = Resources.getResource("producer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        producer = new KafkaProducer<>(properties);
    }

    try {
          int i =0;
          File file = new File("/home/leiming/DataFlow/imply-2.2.3/quickstart/wikiticker-2016-06-27-sampled.json");
          BufferedReader br = new BufferedReader(new FileReader(file));
          String st;
          while((st=br.readLine())!=null) {
              // send lots of messages
              /**
              producer.send(new ProducerRecord<String, String>(
                      "fast-messages",
                      String.format("{\"type\":\"test\", \"t\":%.3f, \"k\":%d}", System.nanoTime() * 1e-9, i)));
                        **/
              // every so often send to a different topic

                  producer.send(new ProducerRecord<String, String>(
                          "leidaxia",
                       //   String.format("{\"type\":\"marker\", \"t\":%.3f, \"k\":%d}", System.nanoTime() * 1e-9, i)));
                   st));
                 /**producer.send(new ProducerRecord<String, String>(
                          "summary-markers",
                          String.format("{\"type\":\"other\", \"t\":%.3f, \"k\":%d}", System.nanoTime() * 1e-9, i)));
                **/
                  producer.flush();
                  System.out.println("Sent msg num" + i);
                  System.out.println("Sent msg " + st);
                  i =i + 1;
              }

    } catch (Throwable throwable) {
        System.out.printf("%s", throwable.getStackTrace());
    } finally {
        producer.close();
    }

}
 
开发者ID:leidaxia,项目名称:kafka-stream-druid,代码行数:45,代码来源:Producer.java


注:本文中的org.apache.kafka.clients.producer.KafkaProducer.send方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。