本文整理汇总了Java中kafka.consumer.KafkaStream.iterator方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaStream.iterator方法的具体用法?Java KafkaStream.iterator怎么用?Java KafkaStream.iterator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.KafkaStream
的用法示例。
在下文中一共展示了KafkaStream.iterator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);
ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
while (true) {
MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
try {
GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
if (offsetMsg.message() == null) {
continue;
}
kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
kafkaConsumerOffsets.put(commitKey, commitValue);
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
示例2: getNextMessage
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public MessageAndMetadata getNextMessage(String topic) {
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// it has only a single stream, because there is only one consumer
KafkaStream stream = streams.get(0);
final ConsumerIterator<byte[], byte[]> it = stream.iterator();
int counter = 0;
try {
if (it.hasNext()) {
return it.next();
} else {
return null;
}
} catch (ConsumerTimeoutException e) {
logger.error("0 messages available to fetch for the topic " + topic);
return null;
}
}
示例3: processStreamsByTopic
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
private void processStreamsByTopic(String topicKeys, List<KafkaStream<byte[], byte[]>> streamList) {
// init stream thread pool
ExecutorService streamPool = Executors.newFixedThreadPool(partitions);
String[] topics = StringUtils.split(topicKeys, ",");
if (log.isDebugEnabled())
log.debug("准备处理消息流集合 KafkaStreamList,topic count={},topics={}, partitions/topic={}", topics.length, topicKeys, partitions);
//遍历stream
AtomicInteger index = new AtomicInteger(0);
for (KafkaStream<byte[], byte[]> stream : streamList) {
Thread streamThread = new Thread() {
@Override
public void run() {
int i = index.getAndAdd(1);
if (log.isDebugEnabled())
log.debug("处理消息流KafkaStream -- No.={}, partitions={}", i, partitions + ":" + i);
ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();
processStreamByConsumer(topicKeys, consumerIterator);
}
};
streamPool.execute(streamThread);
}
}
示例4: testLogging
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Test
public void testLogging() throws InterruptedException {
for (int i = 0; i<1000; ++i) {
logger.info("message"+i);
}
final KafkaStream<byte[], byte[]> log = kafka.createClient().createMessageStreamsByFilter(new Whitelist("logs"),1).get(0);
final ConsumerIterator<byte[], byte[]> iterator = log.iterator();
for (int i=0; i<1000; ++i) {
final String messageFromKafka = new String(iterator.next().message(), UTF8);
assertThat(messageFromKafka, Matchers.equalTo("message"+i));
}
}
示例5: run
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Override
public void run() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(transducer_topic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext() && bStartConsume){
transducerDataProcessor.newData(it.next().message());
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
示例6: KafkaDataProvider
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
super(MessageAndMetadata.class);
Properties props = new Properties();
props.put("zookeeper.connect", zookeeper);
props.put("group.id", groupId);
props.put("zookeeper.session.timeout.ms", "30000");
props.put("auto.commit.interval.ms", "1000");
props.put("fetch.message.max.bytes", "4194304");
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
iter = stream.iterator();
}
示例7: collectMq
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void collectMq(){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(Constants.kfTopic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
MessageAndMetadata<String, String> msgMeta;
while (it.hasNext()){
msgMeta = it.next();
super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
}
}
示例8: consumeMessages
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
private void consumeMessages() {
final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC, 1);
final StringDecoder decoder =
new StringDecoder(new VerifiableProperties());
final Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap, decoder, decoder);
final KafkaStream<String, String> stream =
consumerMap.get(TOPIC).get(0);
final ConsumerIterator<String, String> iterator = stream.iterator();
Thread kafkaMessageReceiverThread = new Thread(
() -> {
while (iterator.hasNext()) {
String msg = iterator.next().message();
msg = msg == null ? "<null>" : msg;
System.out.println("got message: " + msg);
messagesReceived.add(msg);
}
},
"kafkaMessageReceiverThread"
);
kafkaMessageReceiverThread.start();
}
示例9: open
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
_collector = spoutOutputCollector;
Properties props = new Properties();
props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
props.put("group.id", groupId);
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
List<KafkaStream<String, String>> streams = consumerMap.get(topic);
KafkaStream<String, String> stream = null;
if (streams.size() == 1) {
stream = streams.get(0);
} else {
log.error("Streams should be of size 1");
}
kafkaIterator = stream.iterator();
}
示例10: recv
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void recv() {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
Map<String, Integer> topicMap = new HashMap<String, Integer>();
topicMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<String, String>>> streamMap = consumer.createMessageStreams(topicMap, new StringDecoder(null), new StringDecoder(null));
KafkaStream<String, String> stream = streamMap.get(topic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> mm = it.next();
System.out.println("<<< Got new message");
System.out.println("<<< key:" + mm.key());
System.out.println("<<< m: " + mm.message());
}
}
示例11: getStreamIterator
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**
* Modified example from kafka site with some defensive checks added.
*/
private ConsumerIterator<String, String> getStreamIterator() {
Map<String, Integer> topicCountMap = ImmutableMap.of(topic, TOPIC_COUNT);
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap, keyDecoder, msgDecoder);
List<KafkaStream<String, String>> streams = consumerMap.get(topic);
Preconditions.checkNotNull(streams, "There is no topic named : " + topic);
//copy in case of live list returned. Needed for index check below.
ImmutableList<KafkaStream<String, String>> streamsCopy = ImmutableList.copyOf(streams);
Preconditions.checkElementIndex(FIRST_ELEMENT_INDEX, streamsCopy.size(),
"Failed to find any KafkaStreams related to topic : " + topic);
KafkaStream<String, String> stream = streamsCopy.get(FIRST_ELEMENT_INDEX);
Preconditions.checkNotNull(stream, "Returned kafka stream is null");
ConsumerIterator<String, String> iterator = stream.iterator();
Preconditions.checkNotNull(iterator, "Returned kafka iterator is null");
return iterator;
}
示例12: test_producer
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
@Test
public void test_producer() throws Exception {
String topic = "test";
ProducerProperties properties = new ProducerProperties();
properties.override(ProducerProperties.NETTY_DEBUG_PIPELINE, true);
createTopic(topic);
KafkaProducer producer = new KafkaProducer("localhost", START_PORT, topic, properties);
producer.connect().sync();
KafkaTopic kafkaTopic = producer.topic();
kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "01").getBytes()));
kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "02").getBytes()));
kafkaTopic.send(null, freeLaterBuffer((TEST_MESSAGE + "03").getBytes()));
final KafkaStream<byte[], byte[]> stream = consume(topic).get(0);
final ConsumerIterator<byte[], byte[]> messages = stream.iterator();
Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "01"));
Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "02"));
Assert.assertThat(new String(messages.next().message()), is(TEST_MESSAGE + "03"));
producer.disconnect().sync();
}
示例13: activate
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public void activate() {
consumer =kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
Map<String,Integer> topickMap = new HashMap<String, Integer>();
topickMap.put(topic, 1);
System.out.println("*********Results********topic:"+topic);
Map<String, List<KafkaStream<byte[],byte[]>>> streamMap=consumer.createMessageStreams(topickMap);
KafkaStream<byte[],byte[]>stream = streamMap.get(topic).get(0);
ConsumerIterator<byte[],byte[]> it =stream.iterator();
while(it.hasNext()){
String value =new String(it.next().message());
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd日 HH:mm:ss SSS");
Date curDate = new Date(System.currentTimeMillis());
String str = formatter.format(curDate);
System.out.println("storm接收到来自kafka的消息--->" + value);
collector.emit(new Values(value,1,str), value);
}
}
示例14: consume
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
/**消费消息 [指定Topic]
*
* @param topicName 队列名称
* @param groupId Group Name
* @return
*/
static MsgIterator consume(String topicName, String groupId) {
ConsumerConnector consumerConnector = KafkaHelper.getConsumer(groupId);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); //(topic, #stream) pair
topicCountMap.put(topicName, new Integer(1));
//TODO: 可消费多个topic
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap); //Using default decoder
List<KafkaStream<byte[], byte[]>> streamList = consumerMap.get(topicName); //The number of items in the list is #streams, Each Stream supoorts an iterator over message/metadata pair
KafkaStream<byte[], byte[]> stream = streamList.get(0);
//KafkaStream[K,V] K代表partitio Key的类型,V代表Message Value的类型
ConsumerIterator<byte[], byte[]> it = stream.iterator();
MsgIterator iter = new MsgIterator(it);
return iter;
}
示例15: start
import kafka.consumer.KafkaStream; //导入方法依赖的package包/类
public synchronized void start() {
log.info("Starting {}...", this);
try {
this.consumer = KafkaSourceUtil.getConsumer(this.kafkaProps);
} catch (Exception var6) {
throw new FlumeException("Unable to create consumer. Check whether the ZooKeeper server is up and that the Flume agent can connect to it.", var6);
}
HashMap topicCountMap = new HashMap();
topicCountMap.put(this.topic, Integer.valueOf(1));
try {
Map e = this.consumer.createMessageStreams(topicCountMap);
List topicList = (List)e.get(this.topic);
KafkaStream stream = (KafkaStream)topicList.get(0);
this.it = stream.iterator();
} catch (Exception var5) {
throw new FlumeException("Unable to get message iterator from Kafka", var5);
}
log.info("Kafka source {} started.", this.getName());
this.counter.start();
super.start();
}