当前位置: 首页>>代码示例>>Java>>正文


Java KafkaConsumer.poll方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.poll方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.poll方法的具体用法?Java KafkaConsumer.poll怎么用?Java KafkaConsumer.poll使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.poll方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, InterruptedException{
    Properties properties = PropertiesUtils.getProps("consumer.properties");
    properties.setProperty("client.id","whtestconsumer");
    properties.setProperty("group.id","whtestconsumer");
    properties.setProperty("bootstrap.servers", "localhost:9092");
    //properties.setProperty("auto.offset.reset", "earliest");


    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
    String topic = "uav-test.monitor.result";
    TopicPartition topicPartition = new TopicPartition(topic, 0);
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    consumer.assign(topics);
    consumer.seekToEnd(topics);
    long current = consumer.position(topicPartition);
    consumer.seek(topicPartition, current-1000);

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
        Thread.sleep(1);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:26,代码来源:Kafka.java

示例2: receive

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public String receive() {
    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList(properties.getProperty("topic")));
    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.err.println(buffer.size() + "----->" + record);

        }
        if (buffer.size() >= minBatchSize) {
            writeFileToHadoop(buffer);//先把buffer写入文件中
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:21,代码来源:KafkaConsumerForHive.java

示例3: readKafkaTopic

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
    try {
        Properties properties = PropertiesUtils.getProps("consumer.properties");
        properties.setProperty("client.id","readKafkaTopic");
        properties.setProperty("group.id","readKafkaTopic");
        //properties.setProperty("bootstrap.servers", "localhost:9092");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        String topic = map.get("topic").toString();
        //System.out.println("topic="+topic);
        TopicPartition topicPartition = new TopicPartition(topic, 0);
        List<TopicPartition> topics = Arrays.asList(topicPartition);
        consumer.assign(topics);
        consumer.seekToEnd(topics);
        long current = consumer.position(topicPartition);
        long end = current;
        current -= 1000;
        if(current < 0) current = 0;
        consumer.seek(topicPartition, current);
        List<String> result = new ArrayList<>();
        while (current < end) {
            //System.out.println("topic position = "+current);
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                result.add(record.value());
                //System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
            }
            current = consumer.position(topicPartition);
        }
        consumer.close();
        return Response.ok().entity(result).build();
    } catch (Exception e) {
        logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
        return Response.status(204).entity(new Result(-1, e.getMessage())).build();
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:38,代码来源:DataTableResource.java

示例4: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
	Properties props = new Properties();
     props.put("bootstrap.servers", "localhost:9092");
     props.put("group.id", "test");
     props.put("enable.auto.commit", "true");
     props.put("auto.commit.interval.ms", "1000");
     props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
     props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
     KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
     consumer.subscribe(Arrays.asList("demo-topic1"));
     try {
    	 while (true) {
	         ConsumerRecords<String, String> records = consumer.poll(100);
	         for (ConsumerRecord<String, String> record : records)
	         System.out.println(record.toString());
	     }
	
     } catch(Exception e) {
    	 LOGGER.error("Exception occured while consuing messages",e);
     }finally {
    	 consumer.close();
     }
     
}
 
开发者ID:sarojrout,项目名称:spring-tutorial,代码行数:25,代码来源:SampleConsumer.java

示例5: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = KafkaConsumerUtil.createConsumer();
        consumer.subscribe(Arrays.asList(TOPIC));
        boolean flag = true;

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);

            if (flag) {
                Set<TopicPartition> assignments = consumer.assignment();
                assignments.forEach(topicPartition ->
                        consumer.seekToBeginning(
                                Arrays.asList(topicPartition)));
                flag = false;
            }

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }
 
开发者ID:jeqo,项目名称:post-kafka-rewind-consumer-offset,代码行数:22,代码来源:KafkaConsumerFromBeginning.java

示例6: retrieveOneMessage

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private static ConsumerRecord<byte[], byte[]> retrieveOneMessage(KafkaConsumer kafkaConsumer,
                                                                 TopicPartition topicPartition,
                                                                 long offset) {
  kafkaConsumer.seek(topicPartition, offset);
  ConsumerRecords<byte[], byte[]> records;
  ConsumerRecord<byte[], byte[]> record = null;
  while (record == null) {
    records = kafkaConsumer.poll(100);
    if (!records.isEmpty()) {
      LOG.debug("records.count() = {}", records.count());
      List<ConsumerRecord<byte[], byte[]>> reclist = records.records(topicPartition);
      if (reclist != null && !reclist.isEmpty()) {
        record = reclist.get(0);
        break;
      } else {
        LOG.info("recList is null or empty");
      }
    }
  }
  return record;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:22,代码来源:ReplicaStatsManager.java

示例7: retrieveRecordsFromPartitions

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Return a map containing one List of records per partition.
 * This internally creates a Kafka Consumer using the provided consumer properties.
 *
 * @param numPtns
 * @param consumerProperties
 * @return A Map of Partitions(Integer) and the resulting List of messages (byte[]) retrieved
 */
public static Map<Integer, List<byte[]>> retrieveRecordsFromPartitions(String topic, int numPtns,
                                                                 Properties consumerProperties) {

  Map<Integer, List<byte[]>> resultsMap = new HashMap<Integer, List<byte[]>>();
  for (int i = 0; i < numPtns; i++) {
    List<byte[]> partitionResults = new ArrayList<byte[]>();
    resultsMap.put(i, partitionResults);
    KafkaConsumer<String, byte[]> consumer =
        new KafkaConsumer<String, byte[]>(consumerProperties);

    TopicPartition partition = new TopicPartition(topic, i);

    consumer.assign(Arrays.asList(partition));

    ConsumerRecords<String, byte[]> records = consumer.poll(1000);
    for (ConsumerRecord<String, byte[]> record : records) {
      partitionResults.add(record.value());
    }
    consumer.close();
  }
  return resultsMap;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:31,代码来源:KafkaPartitionTestUtil.java

示例8: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {

		ArrayList<String> topicsList = new ArrayList<String>();

		HashMap<String, Object> kafkaProperties = new HashMap<String, Object>();

		topicsList.add("proteus-realtime");
		kafkaProperties.put("bootstrap.servers", "192.168.4.246:6667,192.168.4.247:6667,192.168.4.248:6667");
		kafkaProperties.put("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
		kafkaProperties.put("value.deserializer", ProteusSerializer.class.getName());
		kafkaProperties.put("group.id", "proteus");

		KafkaConsumer<Integer, Measurement> kafkaConsumer;

		kafkaConsumer = new KafkaConsumer<Integer, Measurement>(kafkaProperties, new IntegerDeserializer(),
				new ProteusSerializer());
		kafkaConsumer.subscribe(topicsList);

		try {
			while (true) {
				ConsumerRecords<Integer, Measurement> records = kafkaConsumer.poll(1);
				for (ConsumerRecord<Integer, Measurement> record : records) {
					System.out.println("record realtime: " + record.toString());
				}

			}
		} finally {
			kafkaConsumer.close();
		}

	}
 
开发者ID:proteus-h2020,项目名称:proteus-consumer-couchbase,代码行数:32,代码来源:ExampleRealtime.java

示例9: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:ComsumerDemo4.java

示例10: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        props.put(SESSION_TIMEOUT_MS_CONFIG, 30000);
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new SeekToBeginningRebalanceListener(consumer));

        int num = 0;
        int numOld = -1;
        while (num != numOld) {
            ConsumerRecords<String, String> records = consumer.poll(1000);

            numOld = num;
            num += records.count();

            for (ConsumerRecord record : records) {
                System.out.printf("Key: %s Offset: %s\n", record.key(), record.offset());
            }

            System.out.println("Gelesene Nachrichten:" + num);

        }

        consumer.close();

    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:35,代码来源:RetentionCompactConsumer.java

示例11: run

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
@Override
public TaskReport run(TaskSource taskSource, Schema schema, int taskIndex, PageOutput output) {
    PluginTask task = taskSource.loadTask(PluginTask.class);

    BufferAllocator allocator = task.getBufferAllocator();
    PageBuilder builder = new PageBuilder(allocator, schema, output);
    KafkaInputColumns columns = new KafkaInputColumns(task);

    KafkaProperties props = new KafkaProperties(task);
    KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(task.getTopics());
    setOffsetPosition(consumer, task);

    long readRecords = 0;
    long showReadRecords = 500;
    while(true) {
        ConsumerRecords<?,?> records = consumer.poll(task.getPollTimeoutSec() * 1000);
        if(records.count() == 0) {
            break;
        }
        readRecords += records.count();
        columns.setOutputRecords(builder, records);
        builder.flush();
        if(readRecords >= showReadRecords) {
            logger.info(String.format("Read %d record(s) in task-%d", readRecords, taskIndex));
            showReadRecords *= 2;
        }
    }
    builder.finish();
    builder.close();
    logger.info(String.format("Finishing task-%d.Total %d record(s) read in this task", taskIndex, readRecords));
    consumer.close();

    return Exec.newTaskReport();
}
 
开发者ID:sasakitoa,项目名称:embulk-input-kafka,代码行数:36,代码来源:KafkaInputPlugin.java

示例12: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "a");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("produktion"), new OffsetBeginningRebalanceListener(consumer, "produktion"));

        while(true) {

            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0)
                continue;

            System.out.println(" Count: " + records.count());

            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key= %s, value= %s\n", record.offset(), record.key(), record.value());

        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:29,代码来源:OffsetConsumer.java

示例13: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = createConsumer();
    consumer.subscribe(Arrays.asList(TOPIC));

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        for (ConsumerRecord<String, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
    }
}
 
开发者ID:jeqo,项目名称:post-kafka-rewind-consumer-offset,代码行数:12,代码来源:KafkaSimpleConsumer.java

示例14: consoleLoadGenTest

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
@Test
public void consoleLoadGenTest() throws IOException {
    File schemaFile = File.createTempFile("json", ".schema");
    schemaFile.deleteOnExit();
    FileWriter schemaWriter = new FileWriter(schemaFile);
    schemaWriter.write(TestInputUtils.testSchema);
    schemaWriter.close();

    File producerFile = File.createTempFile("producer", ".properties");
    producerFile.deleteOnExit();
    FileWriter producerPropsWriter = new FileWriter(producerFile);
    producerPropsWriter.write(String.format(TestInputUtils.producerProps, BROKERHOST, BROKERPORT, ZKHOST, zkServer.port()));
    producerPropsWriter.close();

    String vargs []  = new String[]{"--schema-file", schemaFile.getAbsolutePath(), "--producer-config-file", producerFile.getAbsolutePath(), "--throughput-per-producer", "10", "--test-duration", "1", "--num-producers", "1"};
    PepperBoxLoadGenerator.main(vargs);

    Properties consumerProps = new Properties();
    consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    consumerProps.setProperty("group.id", "group");
    consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("auto.offset.reset", "earliest");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList(TOPIC));
    ConsumerRecords<String, String> records = consumer.poll(30000);
    Assert.assertTrue("PepperBoxLoadGenerator validation failed", records.count() > 0);

}
 
开发者ID:GSLabDev,项目名称:pepper-box,代码行数:30,代码来源:PepperBoxLoadGenTest.java

示例15: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(GROUP_ID_CONFIG, "k");
        props.put(ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongDeserializer");
        props.put(VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");


        try {
            KafkaConsumer<Long, String> consumer = new KafkaConsumer<>(props);

            consumer.subscribe(Arrays.asList("produktion"), new LogRebalanceListener());

            while (true) {

                ConsumerRecords<Long, String> records = consumer.poll(1000);
                if (records.count() == 0)
                    continue;

                System.out.print("Partitions: " + records.partitions());
                System.out.println(" Count: " + records.count());

                for (ConsumerRecord<Long, String> record : records)
                    System.out.printf("partition=%d, offset= %d, key= %s, value= %s\n", record.partition(), record.offset(), record.key(), record.value());

            }
        } catch (RuntimeException e) {
            System.out.println("e = " + e);
        } finally {
            System.out.println("Closing!");
        }
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:36,代码来源:SimpleConsumer.java


注:本文中的org.apache.kafka.clients.consumer.KafkaConsumer.poll方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。