当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerIterator.next方法代码示例

本文整理汇总了Java中kafka.consumer.ConsumerIterator.next方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerIterator.next方法的具体用法?Java ConsumerIterator.next怎么用?Java ConsumerIterator.next使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.consumer.ConsumerIterator的用法示例。


在下文中一共展示了ConsumerIterator.next方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:37,代码来源:KafkaOffsetGetter.java

示例2: getNextMessage

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:18,代码来源:KafkaConsumer.java

示例3: shouldWriteThenRead

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Test
public void shouldWriteThenRead() throws Exception {

    //Create a consumer
    ConsumerIterator<String, String> it = buildConsumer(Original.topic);

    //Create a producer
    producer = new KafkaProducer<>(producerProps());

    //send a message
    producer.send(new ProducerRecord<>(Original.topic, "message")).get();

    //read it back
    MessageAndMetadata<String, String> messageAndMetadata = it.next();
    String value = messageAndMetadata.message();
    assertThat(value, is("message"));
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:18,代码来源:Original.java

示例4: shouldWriteThenRead

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Test
public void shouldWriteThenRead() throws Exception {

    //Create a consumer
    ConsumerIterator<String, String> it = buildConsumer(SimpleKafkaTest.topic);

    //Create a producer
    producer = new KafkaProducer<>(producerProps());

    //send a message
    producer.send(new ProducerRecord<>(SimpleKafkaTest.topic, "message")).get();

    //read it back
    MessageAndMetadata<String, String> messageAndMetadata = it.next();
    String value = messageAndMetadata.message();
    assertThat(value, is("message"));
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:18,代码来源:SimpleKafkaTest.java

示例5: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
public void run() {
    ConsumerIterator<byte[], byte[]> it = stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<byte[], byte[]> mam = it.next();
        String jsonStr = "";
        try {
            jsonStr = new String(mam.message());
            JSONObject jsonObject = JSONObject.parseObject(jsonStr);
            LogcenterConfig config = LogConfigCache.getLogConfigCache(jsonObject);
            IStorageApi iStorageApi = ServiceRegister.getInstance().getProvider(config.getStorageType());
            iStorageApi.save(jsonObject);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("partition[" + mam.partition() + "]," + "offset[" + mam.offset() + "], " + jsonStr, e);
            continue;
        }
    }
}
 
开发者ID:geeker-lait,项目名称:tasfe-framework,代码行数:20,代码来源:KafkaConsumerThread.java

示例6: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
	ConsumerIterator<String, String> it = stream.iterator();
	while (it.hasNext()) {
		MessageAndMetadata<String, String> consumerIterator = it.next();
		String uploadMessage = consumerIterator.message();
		System.out.println(Thread.currentThread().getName()
				+ " from partiton[" + consumerIterator.partition() + "]: "
				+ uploadMessage);
		try {
			sendDataToIotdb.writeData(uploadMessage); // upload data to the IoTDB database

		} catch (Exception ex) {
			System.out.println("SQLException: " + ex.getMessage());
		}
	}
}
 
开发者ID:thulab,项目名称:iotdb-jdbc,代码行数:17,代码来源:KafkaConsumer.java

示例7: collectMq

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
开发者ID:lrtdc,项目名称:light_drtc,代码行数:20,代码来源:KafkaMqCollect.java

示例8: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
 	Iote2eRequestReuseItem iote2eRequestReuseItem = new Iote2eRequestReuseItem();
     ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
     while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
try {
      	String summary = 
      			"Thread " + threadNumber + 
      			", topic=" + messageAndMetadata.topic() + 
      			", partition=" + messageAndMetadata.partition() + 
      			", key=" + key + 
      			", offset=" + messageAndMetadata.offset() + 
      			", timestamp=" + messageAndMetadata.timestamp() + 
      			", timestampType=" + messageAndMetadata.timestampType() + 
      			", iote2eRequest=" + iote2eRequestReuseItem.fromByteArray(messageAndMetadata.message()).toString();
      	logger.info(">>> Consumed: " + summary);
} catch( Exception e ) {
	logger.error(e.getMessage(), e);
}
     }
     logger.info(">>> Shutting down Thread: " + threadNumber);
 }
 
开发者ID:petezybrick,项目名称:iote2e,代码行数:24,代码来源:KafkaAvroDemo.java

示例9: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
    ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
    while (it.hasNext()) {
    	MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
    	String key = new String(  messageAndMetadata.key() );
    	String message = new String(  messageAndMetadata.message() );
    	String summary = 
    			"Thread " + threadNumber + 
    			", topic=" + messageAndMetadata.topic() + 
    			", partition=" + messageAndMetadata.partition() + 
    			", key=" + key + 
    			", message=" + message + 
    			", offset=" + messageAndMetadata.offset() + 
    			", timestamp=" + messageAndMetadata.timestamp() + 
    			", timestampType=" + messageAndMetadata.timestampType();
    	logger.info(">>> Consumed: " + summary);
    }
    logger.info(">>> Shutting down Thread: " + threadNumber);
}
 
开发者ID:petezybrick,项目名称:iote2e,代码行数:20,代码来源:KafkaStringDemo.java

示例10: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
	try {
		ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
		Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());

		while (it.hasNext()) {
			MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
			String key = new String(messageAndMetadata.key());
			User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
			// User user = (User)
			// recordInjection.invert(messageAndMetadata.message()).get();
			String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
					+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
					+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
					+ ", timestampType=" + messageAndMetadata.timestampType();
			System.out.println(summary);
		}
		System.out.println("Shutting down Thread: " + m_threadNumber);
	} catch (Exception e) {
		System.out.println("Exception in thread "+m_threadNumber);
		System.out.println(e);
		e.printStackTrace();
	}
}
 
开发者ID:petezybrick,项目名称:iote2e,代码行数:25,代码来源:AvroConsumerThread.java

示例11: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
    ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
    while (it.hasNext()) {
    	MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
    	String key = new String(  messageAndMetadata.key() );
    	String message = new String(  messageAndMetadata.message() );
    	String summary = 
    			"Thread " + threadNumber + 
    			", topic=" + messageAndMetadata.topic() + 
    			", partition=" + messageAndMetadata.partition() + 
    			", key=" + key + 
    			", message=" + message + 
    			", offset=" + messageAndMetadata.offset() + 
    			", timestamp=" + messageAndMetadata.timestamp() + 
    			", timestampType=" + messageAndMetadata.timestampType();
    	System.out.println(summary);
    }
    System.out.println("Shutting down Thread: " + threadNumber);
}
 
开发者ID:petezybrick,项目名称:iote2e,代码行数:20,代码来源:ConsumerDemoThread.java

示例12: recv

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void recv() {
    consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());

    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    topicMap.put(topic, new Integer(1));
    Map<String, List<KafkaStream<String, String>>> streamMap = consumer.createMessageStreams(topicMap, new StringDecoder(null), new StringDecoder(null));

    KafkaStream<String, String> stream = streamMap.get(topic).get(0);

    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<String, String> mm = it.next();
        System.out.println("<<< Got new message");
        System.out.println("<<< key:" + mm.key());
        System.out.println("<<< m: " + mm.message());

    }
}
 
开发者ID:cloudinsight,项目名称:cloudinsight-platform-docker,代码行数:19,代码来源:CollectorTest.java

示例13: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
  logger.info("KafkaChannel {} has stream", this.threadNumber);

  final ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();

  running = true;

  while (running) {
    try {
      if (streamIterator.hasNext()) {
        MessageAndMetadata<byte[], byte[]> messageAndMetadata = streamIterator.next();

        byte[] key = messageAndMetadata.key();
        byte[] message = messageAndMetadata.message();

        consumeMessage(key, message);
      }
    } catch (ConsumerTimeoutException cte) {
      logger.debug("Timed out when consuming from Kafka", cte);

      KafkaHealthCheck.getInstance().heartAttack(cte.getMessage());
    }
  }
}
 
开发者ID:icclab,项目名称:watchtower-workflow,代码行数:25,代码来源:KafkaConsumerRunnable.java

示例14: run

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
public void run() {
    ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<byte[], byte[]> md = it.next();
        byte msg[] = md.message();
        long offset = md.offset();
        String smsg = new String(msg);
        try {
            m_loader.insertRow(new RowWithMetaData(smsg, offset), m_csvParser.parseLine(smsg));
        } catch (Exception ex) {
            m_log.error("Consumer stopped", ex);
            System.exit(1);
        }
    }
}
 
开发者ID:anhnv-3991,项目名称:VoltDB,代码行数:17,代码来源:KafkaLoader.java

示例15: getRecordsInTarget

import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
protected int getRecordsInTarget() {
  int expectedRecordsInTarget = 0;
  for(KafkaStream<byte[], byte[]> kafkaStream : kafkaStreams) {
    ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
    try {
      while (it.hasNext()) {
        expectedRecordsInTarget++;
        it.next();
      }
    } catch (kafka.consumer.ConsumerTimeoutException e) {
      //no-op
    }
  }
  return expectedRecordsInTarget;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:17,代码来源:KafkaDestinationSinglePartitionPipelineRunIT.java


注:本文中的kafka.consumer.ConsumerIterator.next方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。