当前位置: 首页>>代码示例>>Java>>正文


Java KafkaStream类代码示例

本文整理汇总了Java中kafka.consumer.KafkaStream的典型用法代码示例。如果您正苦于以下问题:Java KafkaStream类的具体用法?Java KafkaStream怎么用?Java KafkaStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KafkaStream类属于kafka.consumer包,在下文中一共展示了KafkaStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import kafka.consumer.KafkaStream; //导入依赖的package包/类
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:37,代码来源:KafkaOffsetGetter.java

示例2: getNextMessage

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:18,代码来源:KafkaConsumer.java

示例3: nextTuple

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public void nextTuple() {
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(TopologyConfig.kafkaTopic, 1);//one excutor - one thread
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = conn.createMessageStreams(topicCountMap);
	List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(kafkaTopic);
	ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
	while(true){
		while(iter.hasNext()){
			
			String s = new String(iter.next().message());
			collector.emit(new Values(s));
			 
			UUID msgId = UUID.randomUUID();
			this.pending.put(msgId, new Values(s));
		}
		try {
			Thread.sleep(1000L);
		} catch (InterruptedException e) {
			logger.error("Spout : sleep wrong \n", e);
		}
	}
}
 
开发者ID:zhai3516,项目名称:storm-demos,代码行数:23,代码来源:KafkaDataSpout.java

示例4: processStreamsByTopic

import kafka.consumer.KafkaStream; //导入依赖的package包/类
private void processStreamsByTopic(String topicKeys, List<KafkaStream<byte[], byte[]>> streamList) {
    // init stream thread pool
    ExecutorService streamPool = Executors.newFixedThreadPool(partitions);
    String[] topics = StringUtils.split(topicKeys, ",");
    if (log.isDebugEnabled())
        log.debug("准备处理消息流集合 KafkaStreamList,topic count={},topics={}, partitions/topic={}", topics.length, topicKeys, partitions);

    //遍历stream
    AtomicInteger index = new AtomicInteger(0);
    for (KafkaStream<byte[], byte[]> stream : streamList) {
        Thread streamThread = new Thread() {

            @Override
            public void run() {
                int i = index.getAndAdd(1);
                if (log.isDebugEnabled())
                    log.debug("处理消息流KafkaStream -- No.={}, partitions={}", i, partitions + ":" + i);

                ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

                processStreamByConsumer(topicKeys, consumerIterator);
            }
        };
        streamPool.execute(streamThread);
    }
}
 
开发者ID:KoperGroup,项目名称:koper,代码行数:27,代码来源:KafkaReceiver.java

示例5: testLogging

import kafka.consumer.KafkaStream; //导入依赖的package包/类
@Test
public void testLogging() throws InterruptedException {

    for (int i = 0; i<1000; ++i) {
        logger.info("message"+i);
    }

    final KafkaStream<byte[], byte[]> log = kafka.createClient().createMessageStreamsByFilter(new Whitelist("logs"),1).get(0);
    final ConsumerIterator<byte[], byte[]> iterator = log.iterator();

    for (int i=0; i<1000; ++i) {
        final String messageFromKafka = new String(iterator.next().message(), UTF8);
        assertThat(messageFromKafka, Matchers.equalTo("message"+i));
    }

}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:17,代码来源:LogbackIntegrationIT.java

示例6: run

import kafka.consumer.KafkaStream; //导入依赖的package包/类
@Override
public void run() {
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(transducer_topic, new Integer(1));

    StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
    StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

    Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
    KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext() && bStartConsume){
        transducerDataProcessor.newData(it.next().message());

        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:unrealinux,项目名称:DataProcessPlatformKafkaJavaSDK,代码行数:23,代码来源:KafkaConsumerTransducer.java

示例7: consume

import kafka.consumer.KafkaStream; //导入依赖的package包/类
void consume() throws Exception {
	// specify the number of consumer threads
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(KafkaProducer.TOPIC, new Integer(threadsNum));

	// specify data decoder
	StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
	StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

	Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
			.createMessageStreams(topicCountMap, keyDecoder, valueDecoder); // 三个String分别为TOPIC、Key、Value

	// acquire data
	List<KafkaStream<String, String>> streams = consumerMap.get(KafkaProducer.TOPIC);

	// multi-threaded consume
	executor = Executors.newFixedThreadPool(threadsNum);    //create a thread pool
	for (final KafkaStream<String, String> stream : streams) {
		executor.submit(new ConsumerThread(stream));        // run thread
	}
}
 
开发者ID:thulab,项目名称:iotdb-jdbc,代码行数:22,代码来源:KafkaConsumer.java

示例8: KafkaDataProvider

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
  super(MessageAndMetadata.class);
  Properties props = new Properties();
  props.put("zookeeper.connect", zookeeper);
  props.put("group.id", groupId);
  props.put("zookeeper.session.timeout.ms", "30000");
  props.put("auto.commit.interval.ms", "1000");
  props.put("fetch.message.max.bytes", "4194304");
  consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
  Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);

  iter = stream.iterator();
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:17,代码来源:KafkaDataProvider.java

示例9: start

import kafka.consumer.KafkaStream; //导入依赖的package包/类
/**
 * 启动 MessageReceiver,开始监听topic消息
 */
@Override
public void start() {

    if (consumer == null) {
        //sync init
        synchronized (lock) {
            init();
        }
    }

    String topicString = buildTopicsString();

    Whitelist topicFilter = new Whitelist(topicString);
    List<KafkaStream<byte[], byte[]>> streamList = consumer.createMessageStreamsByFilter(topicFilter, partitions);

    if (org.apache.commons.collections.CollectionUtils.isEmpty(streamList))
        try {
            TimeUnit.MILLISECONDS.sleep(1);
        } catch (InterruptedException e) {
            log.warn(e.getMessage(), e);
        }
    processStreamsByTopic(topicString, streamList);

}
 
开发者ID:KoperGroup,项目名称:koper,代码行数:28,代码来源:KafkaReceiver.java

示例10: KafkaConsumer

import kafka.consumer.KafkaStream; //导入依赖的package包/类
/**
 * KafkaConsumer() is constructor. It has following 4 parameters:-
 * @param topic
 * @param group
 * @param id
 * @param cc
 * 
 */

public KafkaConsumer(String topic, String group, String id, ConsumerConnector cc) {
	fTopic = topic;
	fGroup = group;
	fId = id;
	fConnector = cc;

	fCreateTimeMs = System.currentTimeMillis();
	fLastTouch = fCreateTimeMs;

	fLogTag = fGroup + "(" + fId + ")/" + fTopic;
	offset = 0;

	state = KafkaConsumer.State.OPENED;

	final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(fTopic, 1);
	final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = fConnector
			.createMessageStreams(topicCountMap);
	final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(fTopic);
	fStream = streams.iterator().next();
}
 
开发者ID:att,项目名称:dmaap-framework,代码行数:31,代码来源:KafkaConsumer.java

示例11: collectMq

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
开发者ID:lrtdc,项目名称:light_drtc,代码行数:20,代码来源:KafkaMqCollect.java

示例12: run

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public void run(int a_numThreads) {
    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(topic, new Integer(a_numThreads));

    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

    // now launch all the threads
    //
    executor = Executors.newFixedThreadPool(a_numThreads);

    // now create an object to consume the messages
    //
    int threadNumber = 0;
    for (final KafkaStream stream : streams) {
        executor.submit(new ConsumerThread(consumer, stream, threadNumber));
        threadNumber++;
    }
}
 
开发者ID:bingoohuang,项目名称:javacode-demo,代码行数:20,代码来源:ConsumerGroupExample.java

示例13: run

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public void run(Decoder<K> keyDecoder, Decoder<V> valueDecoder){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(topic, threadNum);
       Map<String, List<KafkaStream<K, V>>> consumerMap = consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
       
       List<KafkaStream<K, V>> streams = consumerMap.get(topic);
       
       executor = Executors.newFixedThreadPool(threadNum);
    
       int threadNo = 0;
	for (final KafkaStream<K, V> stream : streams) {
       	ConsumerWorker<K, V> worker = new ConsumerWorker<K, V>(stream, threadNo);
       	executor.submit(worker);
       	threadNo++;
       }
}
 
开发者ID:sn3009,项目名称:EasyMessage,代码行数:17,代码来源:ConsumerEngine.java

示例14: consumeMessages

import kafka.consumer.KafkaStream; //导入依赖的package包/类
private void consumeMessages() {
    final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC, 1);
    final StringDecoder decoder =
            new StringDecoder(new VerifiableProperties());
    final Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap, decoder, decoder);
    final KafkaStream<String, String> stream =
            consumerMap.get(TOPIC).get(0);
    final ConsumerIterator<String, String> iterator = stream.iterator();

    Thread kafkaMessageReceiverThread = new Thread(
            () -> {
                while (iterator.hasNext()) {
                    String msg = iterator.next().message();
                    msg = msg == null ? "<null>" : msg;
                    System.out.println("got message: " + msg);
                    messagesReceived.add(msg);
                }
            },
            "kafkaMessageReceiverThread"
    );
    kafkaMessageReceiverThread.start();

}
 
开发者ID:hubrick,项目名称:vertx-kafka-service,代码行数:26,代码来源:KafkaProducerServiceIntegrationTest.java

示例15: startup

import kafka.consumer.KafkaStream; //导入依赖的package包/类
public void startup() {
	if (status != Status.INIT) {
		log.error("The client has been started.");
		throw new IllegalStateException("The client has been started.");
	}

	status = Status.RUNNING;

	log.info("Streams num: " + streams.size());
	tasks = new ArrayList<AbstractMessageTask>();
	for (KafkaStream<String, String> stream : streams) {
		AbstractMessageTask abstractMessageTask = (fixedThreadNum == 0 ? new SequentialMessageTask(
				stream, handler) : new ConcurrentMessageTask(stream,
				handler, fixedThreadNum));
		tasks.add(abstractMessageTask);
		streamThreadPool.execute(abstractMessageTask);
	}
}
 
开发者ID:robertleepeak,项目名称:kclient,代码行数:19,代码来源:KafkaConsumer.java


注:本文中的kafka.consumer.KafkaStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。