當前位置: 首頁>>代碼示例>>Java>>正文


Java KafkaConsumer.poll方法代碼示例

本文整理匯總了Java中org.apache.kafka.clients.consumer.KafkaConsumer.poll方法的典型用法代碼示例。如果您正苦於以下問題:Java KafkaConsumer.poll方法的具體用法?Java KafkaConsumer.poll怎麽用?Java KafkaConsumer.poll使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.kafka.clients.consumer.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.poll方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) throws IOException, InterruptedException{
    Properties properties = PropertiesUtils.getProps("consumer.properties");
    properties.setProperty("client.id","whtestconsumer");
    properties.setProperty("group.id","whtestconsumer");
    properties.setProperty("bootstrap.servers", "localhost:9092");
    //properties.setProperty("auto.offset.reset", "earliest");


    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
    String topic = "uav-test.monitor.result";
    TopicPartition topicPartition = new TopicPartition(topic, 0);
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    consumer.assign(topics);
    consumer.seekToEnd(topics);
    long current = consumer.position(topicPartition);
    consumer.seek(topicPartition, current-1000);

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
        Thread.sleep(1);
    }
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:26,代碼來源:Kafka.java

示例2: receive

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public String receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.err.println(buffer.size() + "----->" + record);

        }
        if (buffer.size() >= minBatchSize) {
            writeFileToHadoop(buffer);//先把buffer寫入文件中
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
開發者ID:wanghan0501,項目名稱:WiFiProbeAnalysis,代碼行數:21,代碼來源:KafkaConsumerForHive.java

示例3: readKafkaTopic

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
    try {
        Properties properties = PropertiesUtils.getProps("consumer.properties");
        properties.setProperty("client.id","readKafkaTopic");
        properties.setProperty("group.id","readKafkaTopic");
        //properties.setProperty("bootstrap.servers", "localhost:9092");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        String topic = map.get("topic").toString();
        //System.out.println("topic="+topic);
        TopicPartition topicPartition = new TopicPartition(topic, 0);
        List<TopicPartition> topics = Arrays.asList(topicPartition);
        consumer.assign(topics);
        consumer.seekToEnd(topics);
        long current = consumer.position(topicPartition);
        long end = current;
        current -= 1000;
        if(current < 0) current = 0;
        consumer.seek(topicPartition, current);
        List<String> result = new ArrayList<>();
        while (current < end) {
            //System.out.println("topic position = "+current);
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                result.add(record.value());
                //System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
            }
            current = consumer.position(topicPartition);
        }
        consumer.close();
        return Response.ok().entity(result).build();
    } catch (Exception e) {
        logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
        return Response.status(204).entity(new Result(-1, e.getMessage())).build();
    }
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:38,代碼來源:DataTableResource.java

示例4: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {
	Properties props = new Properties();
     props.put("bootstrap.servers", "localhost:9092");
     props.put("group.id", "test");
     props.put("enable.auto.commit", "true");
     props.put("auto.commit.interval.ms", "1000");
     props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
     props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
     KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
     consumer.subscribe(Arrays.asList("demo-topic1"));
     try {
    	 while (true) {
	         ConsumerRecords<String, String> records = consumer.poll(100);
	         for (ConsumerRecord<String, String> record : records)
	         System.out.println(record.toString());
	     }
	
     } catch(Exception e) {
    	 LOGGER.error("Exception occured while consuing messages",e);
     }finally {
    	 consumer.close();
     }
     
}
 
開發者ID:sarojrout,項目名稱:spring-tutorial,代碼行數:25,代碼來源:SampleConsumer.java

示例5: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = KafkaConsumerUtil.createConsumer();
        consumer.subscribe(Arrays.asList(TOPIC));
        boolean flag = true;

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);

            if (flag) {
                Set<TopicPartition> assignments = consumer.assignment();
                assignments.forEach(topicPartition ->
                        consumer.seekToBeginning(
                                Arrays.asList(topicPartition)));
                flag = false;
            }

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }
 
開發者ID:jeqo,項目名稱:post-kafka-rewind-consumer-offset,代碼行數:22,代碼來源:KafkaConsumerFromBeginning.java

示例6: retrieveOneMessage

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
private static ConsumerRecord<byte[], byte[]> retrieveOneMessage(KafkaConsumer kafkaConsumer,
                                                                 TopicPartition topicPartition,
                                                                 long offset) {
  kafkaConsumer.seek(topicPartition, offset);
  ConsumerRecords<byte[], byte[]> records;
  ConsumerRecord<byte[], byte[]> record = null;
  while (record == null) {
    records = kafkaConsumer.poll(100);
    if (!records.isEmpty()) {
      LOG.debug("records.count() = {}", records.count());
      List<ConsumerRecord<byte[], byte[]>> reclist = records.records(topicPartition);
      if (reclist != null && !reclist.isEmpty()) {
        record = reclist.get(0);
        break;
      } else {
        LOG.info("recList is null or empty");
      }
    }
  }
  return record;
}
 
開發者ID:pinterest,項目名稱:doctorkafka,代碼行數:22,代碼來源:ReplicaStatsManager.java

示例7: retrieveRecordsFromPartitions

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
/**
 * Return a map containing one List of records per partition.
 * This internally creates a Kafka Consumer using the provided consumer properties.
 *
 * @param numPtns
 * @param consumerProperties
 * @return A Map of Partitions(Integer) and the resulting List of messages (byte[]) retrieved
 */
public static Map<Integer, List<byte[]>> retrieveRecordsFromPartitions(String topic, int numPtns,
                                                                 Properties consumerProperties) {

  Map<Integer, List<byte[]>> resultsMap = new HashMap<Integer, List<byte[]>>();
  for (int i = 0; i < numPtns; i++) {
    List<byte[]> partitionResults = new ArrayList<byte[]>();
    resultsMap.put(i, partitionResults);
    KafkaConsumer<String, byte[]> consumer =
        new KafkaConsumer<String, byte[]>(consumerProperties);

    TopicPartition partition = new TopicPartition(topic, i);

    consumer.assign(Arrays.asList(partition));

    ConsumerRecords<String, byte[]> records = consumer.poll(1000);
    for (ConsumerRecord<String, byte[]> record : records) {
      partitionResults.add(record.value());
    }
    consumer.close();
  }
  return resultsMap;
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:31,代碼來源:KafkaPartitionTestUtil.java

示例8: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {

		ArrayList<String> topicsList = new ArrayList<String>();

		HashMap<String, Object> kafkaProperties = new HashMap<String, Object>();

		topicsList.add("proteus-realtime");
		kafkaProperties.put("bootstrap.servers", "192.168.4.246:6667,192.168.4.247:6667,192.168.4.248:6667");
		kafkaProperties.put("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
		kafkaProperties.put("value.deserializer", ProteusSerializer.class.getName());
		kafkaProperties.put("group.id", "proteus");

		KafkaConsumer<Integer, Measurement> kafkaConsumer;

		kafkaConsumer = new KafkaConsumer<Integer, Measurement>(kafkaProperties, new IntegerDeserializer(),
				new ProteusSerializer());
		kafkaConsumer.subscribe(topicsList);

		try {
			while (true) {
				ConsumerRecords<Integer, Measurement> records = kafkaConsumer.poll(1);
				for (ConsumerRecord<Integer, Measurement> record : records) {
					System.out.println("record realtime: " + record.toString());
				}

			}
		} finally {
			kafkaConsumer.close();
		}

	}
 
開發者ID:proteus-h2020,項目名稱:proteus-consumer-couchbase,代碼行數:32,代碼來源:ExampleRealtime.java

示例9: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一個或多個broker,不用指定全部的broker,它將自動發現集群中的其餘的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自動確認offset
    configs.put("enable.auto.commit", "false");
    // 自動確認offset的時間間隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步確認某個分區的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:33,代碼來源:ComsumerDemo4.java

示例10: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        props.put(SESSION_TIMEOUT_MS_CONFIG, 30000);
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new SeekToBeginningRebalanceListener(consumer));

        int num = 0;
        int numOld = -1;
        while (num != numOld) {
            ConsumerRecords<String, String> records = consumer.poll(1000);

            numOld = num;
            num += records.count();

            for (ConsumerRecord record : records) {
                System.out.printf("Key: %s Offset: %s\n", record.key(), record.offset());
            }

            System.out.println("Gelesene Nachrichten:" + num);

        }

        consumer.close();

    }
 
開發者ID:predic8,項目名稱:apache-kafka-demos,代碼行數:35,代碼來源:RetentionCompactConsumer.java

示例11: run

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
@Override
public TaskReport run(TaskSource taskSource, Schema schema, int taskIndex, PageOutput output) {
    PluginTask task = taskSource.loadTask(PluginTask.class);

    BufferAllocator allocator = task.getBufferAllocator();
    PageBuilder builder = new PageBuilder(allocator, schema, output);
    KafkaInputColumns columns = new KafkaInputColumns(task);

    KafkaProperties props = new KafkaProperties(task);
    KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(task.getTopics());
    setOffsetPosition(consumer, task);

    long readRecords = 0;
    long showReadRecords = 500;
    while(true) {
        ConsumerRecords<?,?> records = consumer.poll(task.getPollTimeoutSec() * 1000);
        if(records.count() == 0) {
            break;
        }
        readRecords += records.count();
        columns.setOutputRecords(builder, records);
        builder.flush();
        if(readRecords >= showReadRecords) {
            logger.info(String.format("Read %d record(s) in task-%d", readRecords, taskIndex));
            showReadRecords *= 2;
        }
    }
    builder.finish();
    builder.close();
    logger.info(String.format("Finishing task-%d.Total %d record(s) read in this task", taskIndex, readRecords));
    consumer.close();

    return Exec.newTaskReport();
}
 
開發者ID:sasakitoa,項目名稱:embulk-input-kafka,代碼行數:36,代碼來源:KafkaInputPlugin.java

示例12: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new OffsetBeginningRebalanceListener(consumer, "produktion"));

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            System.out.println(" Count: " + records.count());

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key= %s, value= %s\n", record.offset(), record.key(), record.value());

        }
    }
 
開發者ID:predic8,項目名稱:apache-kafka-demos,代碼行數:29,代碼來源:OffsetConsumer.java

示例13: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = createConsumer();
    consumer.subscribe(Arrays.asList(TOPIC));

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
    }
}
 
開發者ID:jeqo,項目名稱:post-kafka-rewind-consumer-offset,代碼行數:12,代碼來源:KafkaSimpleConsumer.java

示例14: consoleLoadGenTest

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
@Test
public void consoleLoadGenTest() throws IOException {
    File schemaFile = File.createTempFile("json", ".schema");
    schemaFile.deleteOnExit();
    FileWriter schemaWriter = new FileWriter(schemaFile);
    schemaWriter.write(TestInputUtils.testSchema);
    schemaWriter.close();

    File producerFile = File.createTempFile("producer", ".properties");
    producerFile.deleteOnExit();
    FileWriter producerPropsWriter = new FileWriter(producerFile);
    producerPropsWriter.write(String.format(TestInputUtils.producerProps, BROKERHOST, BROKERPORT, ZKHOST, zkServer.port()));
    producerPropsWriter.close();

    String vargs []  = new String[]{"--schema-file", schemaFile.getAbsolutePath(), "--producer-config-file", producerFile.getAbsolutePath(), "--throughput-per-producer", "10", "--test-duration", "1", "--num-producers", "1"};
    PepperBoxLoadGenerator.main(vargs);

    Properties consumerProps = new Properties();
    consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    consumerProps.setProperty("group.id", "group");
    consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("auto.offset.reset", "earliest");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList(TOPIC));
    ConsumerRecords<String, String> records = consumer.poll(30000);
    Assert.assertTrue("PepperBoxLoadGenerator validation failed", records.count() > 0);

}
 
開發者ID:GSLabDev,項目名稱:pepper-box,代碼行數:30,代碼來源:PepperBoxLoadGenTest.java

示例15: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "k");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");


        try {
            KafkaConsumer<Long, String> consumer = new KafkaConsumer<>(props);

            consumer.subscribe(Arrays.asList("produktion"), new LogRebalanceListener());

            while (true) {

                ConsumerRecords<Long, String> records = consumer.poll(1000);
                if (records.count() == 0)
                    continue;

                System.out.print("Partitions: " + records.partitions());
                System.out.println(" Count: " + records.count());

                for (ConsumerRecord<Long, String> record : records)
                    System.out.printf("partition=%d, offset= %d, key= %s, value= %s\n", record.partition(), record.offset(), record.key(), record.value());

            }
        } catch (RuntimeException e) {
            System.out.println("e = " + e);
        } finally {
            System.out.println("Closing!");
        }
    }
 
開發者ID:predic8,項目名稱:apache-kafka-demos,代碼行數:36,代碼來源:SimpleConsumer.java


注:本文中的org.apache.kafka.clients.consumer.KafkaConsumer.poll方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。