当前位置: 首页>>代码示例>>Java>>正文


Java KafkaStream.iterator方法代码示例

本文整理汇总了Java中kafka.consumer.KafkaStream.iterator方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaStream.iterator方法的具体用法?Java KafkaStream.iterator怎么用?Java KafkaStream.iterator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.consumer.KafkaStream的用法示例。


在下文中一共展示了KafkaStream.iterator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:37,代码来源:KafkaOffsetGetter.java

示例2: getNextMessage

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:18,代码来源:KafkaConsumer.java

示例3: processStreamsByTopic

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
private void processStreamsByTopic(String topicKeys, List<KafkaStream<byte[], byte[]>> streamList) {
    // init stream thread pool
    ExecutorService streamPool = Executors.newFixedThreadPool(partitions);
    String[] topics = StringUtils.split(topicKeys, ",");
    if (log.isDebugEnabled())
        log.debug("准备处理消息流集合 KafkaStreamList,topic count={},topics={}, partitions/topic={}", topics.length, topicKeys, partitions);

    //遍历stream
    AtomicInteger index = new AtomicInteger(0);
    for (KafkaStream<byte[], byte[]> stream : streamList) {
        Thread streamThread = new Thread() {

            @Override
            public void run() {
                int i = index.getAndAdd(1);
                if (log.isDebugEnabled())
                    log.debug("处理消息流KafkaStream -- No.={}, partitions={}", i, partitions + ":" + i);

                ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

                processStreamByConsumer(topicKeys, consumerIterator);
            }
        };
        streamPool.execute(streamThread);
    }
}
 
开发者ID:KoperGroup,项目名称:koper,代码行数:27,代码来源:KafkaReceiver.java

示例4: testLogging

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Test
public void testLogging() throws InterruptedException {

    for (int i = 0; i<1000; ++i) {
        logger.info("message"+i);
    }

    final KafkaStream<byte[], byte[]> log = kafka.createClient().createMessageStreamsByFilter(new Whitelist("logs"),1).get(0);
    final ConsumerIterator<byte[], byte[]> iterator = log.iterator();

    for (int i=0; i<1000; ++i) {
        final String messageFromKafka = new String(iterator.next().message(), UTF8);
        assertThat(messageFromKafka, Matchers.equalTo("message"+i));
    }

}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:17,代码来源:LogbackIntegrationIT.java

示例5: run

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Override
public void run() {
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(transducer_topic, new Integer(1));

    StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
    StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

    Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
    KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext() && bStartConsume){
        transducerDataProcessor.newData(it.next().message());

        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:unrealinux,项目名称:DataProcessPlatformKafkaJavaSDK,代码行数:23,代码来源:KafkaConsumerTransducer.java

示例6: KafkaDataProvider

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
  super(MessageAndMetadata.class);
  Properties props = new Properties();
  props.put("zookeeper.connect", zookeeper);
  props.put("group.id", groupId);
  props.put("zookeeper.session.timeout.ms", "30000");
  props.put("auto.commit.interval.ms", "1000");
  props.put("fetch.message.max.bytes", "4194304");
  consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
  Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);

  iter = stream.iterator();
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:17,代码来源:KafkaDataProvider.java

示例7: collectMq

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
开发者ID:lrtdc,项目名称:light_drtc,代码行数:20,代码来源:KafkaMqCollect.java

示例8: consumeMessages

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
private void consumeMessages() {
    final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC, 1);
    final StringDecoder decoder =
            new StringDecoder(new VerifiableProperties());
    final Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap, decoder, decoder);
    final KafkaStream<String, String> stream =
            consumerMap.get(TOPIC).get(0);
    final ConsumerIterator<String, String> iterator = stream.iterator();

    Thread kafkaMessageReceiverThread = new Thread(
            () -> {
                while (iterator.hasNext()) {
                    String msg = iterator.next().message();
                    msg = msg == null ? "<null>" : msg;
                    System.out.println("got message: " + msg);
                    messagesReceived.add(msg);
                }
            },
            "kafkaMessageReceiverThread"
    );
    kafkaMessageReceiverThread.start();

}
 
开发者ID:hubrick,项目名称:vertx-kafka-service,代码行数:26,代码来源:KafkaProducerServiceIntegrationTest.java

示例9: open

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
    _collector = spoutOutputCollector;
    Properties props = new Properties();
    props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
    props.put("group.id", groupId);
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
    List<KafkaStream<String, String>> streams = consumerMap.get(topic);
    KafkaStream<String, String> stream = null;
    if (streams.size() == 1) {
        stream = streams.get(0);
    } else {
        log.error("Streams should be of size 1");
    }
    kafkaIterator = stream.iterator();
}
 
开发者ID:geomesa,项目名称:geomesa-tutorials,代码行数:22,代码来源:OSMKafkaSpout.java

示例10: recv

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void recv() {
    consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());

    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    topicMap.put(topic, new Integer(1));
    Map<String, List<KafkaStream<String, String>>> streamMap = consumer.createMessageStreams(topicMap, new StringDecoder(null), new StringDecoder(null));

    KafkaStream<String, String> stream = streamMap.get(topic).get(0);

    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<String, String> mm = it.next();
        System.out.println("<<< Got new message");
        System.out.println("<<< key:" + mm.key());
        System.out.println("<<< m: " + mm.message());

    }
}
 
开发者ID:cloudinsight,项目名称:cloudinsight-platform-docker,代码行数:19,代码来源:CollectorTest.java

示例11: getStreamIterator

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**
 * Modified example from kafka site with some defensive checks added.
 */
private ConsumerIterator<String, String> getStreamIterator() {
    Map<String, Integer> topicCountMap = ImmutableMap.of(topic, TOPIC_COUNT);
    Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap, keyDecoder, msgDecoder);
    List<KafkaStream<String, String>> streams = consumerMap.get(topic);
    Preconditions.checkNotNull(streams, "There is no topic named : " + topic);
    //copy in case of live list returned. Needed for index check below.
    ImmutableList<KafkaStream<String, String>> streamsCopy = ImmutableList.copyOf(streams);

    Preconditions.checkElementIndex(FIRST_ELEMENT_INDEX, streamsCopy.size(),
            "Failed to find any KafkaStreams related to topic : " + topic);
    KafkaStream<String, String> stream = streamsCopy.get(FIRST_ELEMENT_INDEX);

    Preconditions.checkNotNull(stream, "Returned kafka stream is null");

    ConsumerIterator<String, String> iterator = stream.iterator();
    Preconditions.checkNotNull(iterator, "Returned kafka iterator is null");
    return iterator;
}
 
开发者ID:trustedanalytics,项目名称:data-acquisition,代码行数:23,代码来源:KafkaRequestIdQueue.java

示例12: test_producer

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Test
public void test_producer() throws Exception {

    String topic = "test";
    ProducerProperties properties = new ProducerProperties();
    properties.override(ProducerProperties.NETTY_DEBUG_PIPELINE, true);
    createTopic(topic);

    KafkaProducer producer = new KafkaProducer("localhost", START_PORT, topic, properties);
    producer.connect().sync();
    KafkaTopic kafkaTopic = producer.topic();

    kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "01").getBytes()));
    kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "02").getBytes()));
    kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "03").getBytes()));

    final KafkaStream<byte[], byte[]> stream = consume(topic).get(0);
    final ConsumerIterator<byte[], byte[]> messages = stream.iterator();

    Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "01"));
    Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "02"));
    Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "03"));
    producer.disconnect().sync();
}
 
开发者ID:milenkovicm,项目名称:netty-kafka-producer,代码行数:25,代码来源:KafkaTopicSingleBrokerTest.java

示例13: activate

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void activate() {         
    consumer =kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig()); 
    Map<String,Integer> topickMap = new HashMap<String, Integer>();  
    topickMap.put(topic, 1);  
 
    System.out.println("*********Results********topic:"+topic);  
 
    Map<String, List<KafkaStream<byte[],byte[]>>>  streamMap=consumer.createMessageStreams(topickMap);  
    KafkaStream<byte[],byte[]>stream = streamMap.get(topic).get(0);  
    ConsumerIterator<byte[],byte[]> it =stream.iterator();   
    while(it.hasNext()){  
         String value =new String(it.next().message());
         SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd日 HH:mm:ss SSS");
         Date curDate = new Date(System.currentTimeMillis());       
         String str = formatter.format(curDate);   
         System.out.println("storm接收到来自kafka的消息--->" + value);
         collector.emit(new Values(value,1,str), value);
    }  
}
 
开发者ID:coodoing,项目名称:LogRTA,代码行数:20,代码来源:KafkaSpoutTest.java

示例14: consume

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**消费消息  [指定Topic]
 * 
 * @param topicName 队列名称
 * @param groupId Group Name
 * @return
 */
static MsgIterator consume(String topicName, String groupId) {
	ConsumerConnector consumerConnector = KafkaHelper.getConsumer(groupId);
	
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();	//(topic, #stream) pair
	topicCountMap.put(topicName, new Integer(1));

	//TODO: 可消费多个topic
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);	//Using default decoder
	List<KafkaStream<byte[], byte[]>> streamList = consumerMap.get(topicName);	//The number of items in the list is #streams, Each Stream supoorts an iterator over message/metadata pair
	KafkaStream<byte[], byte[]> stream = streamList.get(0);
	
	//KafkaStream[K,V] K代表partitio Key的类型,V代表Message Value的类型
	ConsumerIterator<byte[], byte[]> it = stream.iterator();
	MsgIterator iter = new MsgIterator(it);
	return iter;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:23,代码来源:KafkaHelper.java

示例15: start

import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public synchronized void start() {
  log.info("Starting {}...", this);

  try {
    this.consumer = KafkaSourceUtil.getConsumer(this.kafkaProps);
  } catch (Exception var6) {
    throw new FlumeException("Unable to create consumer. Check whether the ZooKeeper server is up and that the Flume agent can connect to it.", var6);
  }

  HashMap topicCountMap = new HashMap();
  topicCountMap.put(this.topic, Integer.valueOf(1));

  try {
    Map e = this.consumer.createMessageStreams(topicCountMap);
    List topicList = (List)e.get(this.topic);
    KafkaStream stream = (KafkaStream)topicList.get(0);
    this.it = stream.iterator();
  } catch (Exception var5) {
    throw new FlumeException("Unable to get message iterator from Kafka", var5);
  }

  log.info("Kafka source {} started.", this.getName());
  this.counter.start();
  super.start();
}
 
开发者ID:hadooparchitecturebook,项目名称:fraud-detection-tutorial,代码行数:26,代码来源:FastKafkaSource.java


注:本文中的kafka.consumer.KafkaStream.iterator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。