本文整理汇总了Java中kafka.consumer.ConsumerIterator.hasNext方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerIterator.hasNext方法的具体用法?Java ConsumerIterator.hasNext怎么用?Java ConsumerIterator.hasNext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.ConsumerIterator
的用法示例。
在下文中一共展示了ConsumerIterator.hasNext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getNextMessage
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public MessageAndMetadata getNextMessage(String topic) {
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// it has only a single stream, because there is only one consumer
KafkaStream stream = streams.get(0);
final ConsumerIterator<byte[], byte[]> it = stream.iterator();
int counter = 0;
try {
if (it.hasNext()) {
return it.next();
} else {
return null;
}
} catch (ConsumerTimeoutException e) {
logger.error("0 messages available to fetch for the topic " + topic);
return null;
}
}
示例2: nextTuple
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void nextTuple() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TopologyConfig.kafkaTopic, 1);//one excutor - one thread
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = conn.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(kafkaTopic);
ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
while(true){
while(iter.hasNext()){
String s = new String(iter.next().message());
collector.emit(new Values(s));
UUID msgId = UUID.randomUUID();
this.pending.put(msgId, new Values(s));
}
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
logger.error("Spout : sleep wrong \n", e);
}
}
}
示例3: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
try {
while(true){
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
while (it.hasNext()) {
String m = null;
try {
m = new String(it.next().message(),
this.kafkaInput.encoding);
Map<String, Object> event = this.decoder
.decode(m);
if(zkDistributed==null){
this.kafkaInput.process(event);
}else{
zkDistributed.route(event);
}
} catch (Exception e) {
logger.error("process event:{} failed:{}",m,ExceptionUtil.getErrorMessage(e));
}
}
}
} catch (Exception t) {
logger.error("kakfa Consumer fetch is error:{}",ExceptionUtil.getErrorMessage(t));
}
}
示例4: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> mam = it.next();
String jsonStr = "";
try {
jsonStr = new String(mam.message());
JSONObject jsonObject = JSONObject.parseObject(jsonStr);
LogcenterConfig config = LogConfigCache.getLogConfigCache(jsonObject);
IStorageApi iStorageApi = ServiceRegister.getInstance().getProvider(config.getStorageType());
iStorageApi.save(jsonObject);
} catch (Exception e) {
e.printStackTrace();
logger.error("partition[" + mam.partition() + "]," + "offset[" + mam.offset() + "], " + jsonStr, e);
continue;
}
}
}
示例5: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
public void run() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(transducer_topic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext() && bStartConsume){
transducerDataProcessor.newData(it.next().message());
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
示例6: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> consumerIterator = it.next();
String uploadMessage = consumerIterator.message();
System.out.println(Thread.currentThread().getName()
+ " from partiton[" + consumerIterator.partition() + "]: "
+ uploadMessage);
try {
sendDataToIotdb.writeData(uploadMessage); // upload data to the IoTDB database
} catch (Exception ex) {
System.out.println("SQLException: " + ex.getMessage());
}
}
}
示例7: collectMq
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void collectMq(){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(Constants.kfTopic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
MessageAndMetadata<String, String> msgMeta;
while (it.hasNext()){
msgMeta = it.next();
super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
}
}
示例8: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
}
示例9: consumeMessages
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
private void consumeMessages() {
final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC, 1);
final StringDecoder decoder =
new StringDecoder(new VerifiableProperties());
final Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap, decoder, decoder);
final KafkaStream<String, String> stream =
consumerMap.get(TOPIC).get(0);
final ConsumerIterator<String, String> iterator = stream.iterator();
Thread kafkaMessageReceiverThread = new Thread(
() -> {
while (iterator.hasNext()) {
String msg = iterator.next().message();
msg = msg == null ? "<null>" : msg;
System.out.println("got message: " + msg);
messagesReceived.add(msg);
}
},
"kafkaMessageReceiverThread"
);
kafkaMessageReceiverThread.start();
}
示例10: recv
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void recv() {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
Map<String, Integer> topicMap = new HashMap<String, Integer>();
topicMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<String, String>>> streamMap = consumer.createMessageStreams(topicMap, new StringDecoder(null), new StringDecoder(null));
KafkaStream<String, String> stream = streamMap.get(topic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> mm = it.next();
System.out.println("<<< Got new message");
System.out.println("<<< key:" + mm.key());
System.out.println("<<< m: " + mm.message());
}
}
示例11: readTopicToList
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
/**
* Read topic to list, only using Kafka code.
*/
private static List<MessageAndMetadata<byte[], byte[]>> readTopicToList(String topicName, ConsumerConfig config, final int stopAfter) {
ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
// we request only one stream per consumer instance. Kafka will make sure that each consumer group
// will see each message only once.
Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
if (streams.size() != 1) {
throw new RuntimeException("Expected only one message stream but got "+streams.size());
}
List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
if (kafkaStreams == null) {
throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
}
if (kafkaStreams.size() != 1) {
throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
}
LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();
List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
int read = 0;
while(iteratorToRead.hasNext()) {
read++;
result.add(iteratorToRead.next());
if (read == stopAfter) {
LOG.info("Read "+read+" elements");
return result;
}
}
return result;
}
示例12: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + m_threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+m_threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例13: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
logger.info(">>> Consumed: " + summary);
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例14: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
public void run() {
logger.info("KafkaChannel {} has stream", this.threadNumber);
final ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();
running = true;
while (running) {
try {
if (streamIterator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = streamIterator.next();
byte[] key = messageAndMetadata.key();
byte[] message = messageAndMetadata.message();
consumeMessage(key, message);
}
} catch (ConsumerTimeoutException cte) {
logger.debug("Timed out when consuming from Kafka", cte);
KafkaHealthCheck.getInstance().heartAttack(cte.getMessage());
}
}
}
示例15: run
import kafka.consumer.ConsumerIterator; //导入方法依赖的package包/类
@Override
public void run() {
ConsumerIterator<byte[], byte[]> iter = kafkaStream.iterator();
MessageAndMetadata<byte[], byte[]> msg;
int total = 0, fail = 0, success = 0;
long start = System.currentTimeMillis();
while (iter.hasNext()) {
try {
msg = iter.next();
_log.info("Thread {}: {}", threadNum, new String(msg.message(), "utf-8"));
_log.info("partition: {}, offset: {}", msg.partition(), msg.offset());
success++;
} catch (Exception e) {
_log.error("{}", e);
fail++;
}
_log.info("Count [fail/success/total]: [{}/{}/{}], Time: {}s", fail, success, ++total,
(System.currentTimeMillis() - start) / 1000);
}
}