本文整理匯總了Java中kafka.consumer.KafkaStream類的典型用法代碼示例。如果您正苦於以下問題:Java KafkaStream類的具體用法?Java KafkaStream怎麽用?Java KafkaStream使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
KafkaStream類屬於kafka.consumer包,在下文中一共展示了KafkaStream類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import kafka.consumer.KafkaStream; //導入依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);
ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
while (true) {
MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
try {
GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
if (offsetMsg.message() == null) {
continue;
}
kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
kafkaConsumerOffsets.put(commitKey, commitValue);
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
示例2: getNextMessage
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public MessageAndMetadata getNextMessage(String topic) {
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// it has only a single stream, because there is only one consumer
KafkaStream stream = streams.get(0);
final ConsumerIterator<byte[], byte[]> it = stream.iterator();
int counter = 0;
try {
if (it.hasNext()) {
return it.next();
} else {
return null;
}
} catch (ConsumerTimeoutException e) {
logger.error("0 messages available to fetch for the topic " + topic);
return null;
}
}
示例3: nextTuple
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public void nextTuple() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TopologyConfig.kafkaTopic, 1);//one excutor - one thread
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = conn.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(kafkaTopic);
ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
while(true){
while(iter.hasNext()){
String s = new String(iter.next().message());
collector.emit(new Values(s));
UUID msgId = UUID.randomUUID();
this.pending.put(msgId, new Values(s));
}
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
logger.error("Spout : sleep wrong \n", e);
}
}
}
示例4: processStreamsByTopic
import kafka.consumer.KafkaStream; //導入依賴的package包/類
private void processStreamsByTopic(String topicKeys, List<KafkaStream<byte[], byte[]>> streamList) {
// init stream thread pool
ExecutorService streamPool = Executors.newFixedThreadPool(partitions);
String[] topics = StringUtils.split(topicKeys, ",");
if (log.isDebugEnabled())
log.debug("準備處理消息流集合 KafkaStreamList,topic count={},topics={}, partitions/topic={}", topics.length, topicKeys, partitions);
//遍曆stream
AtomicInteger index = new AtomicInteger(0);
for (KafkaStream<byte[], byte[]> stream : streamList) {
Thread streamThread = new Thread() {
@Override
public void run() {
int i = index.getAndAdd(1);
if (log.isDebugEnabled())
log.debug("處理消息流KafkaStream -- No.={}, partitions={}", i, partitions + ":" + i);
ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();
processStreamByConsumer(topicKeys, consumerIterator);
}
};
streamPool.execute(streamThread);
}
}
示例5: testLogging
import kafka.consumer.KafkaStream; //導入依賴的package包/類
@Test
public void testLogging() throws InterruptedException {
for (int i = 0; i<1000; ++i) {
logger.info("message"+i);
}
final KafkaStream<byte[], byte[]> log = kafka.createClient().createMessageStreamsByFilter(new Whitelist("logs"),1).get(0);
final ConsumerIterator<byte[], byte[]> iterator = log.iterator();
for (int i=0; i<1000; ++i) {
final String messageFromKafka = new String(iterator.next().message(), UTF8);
assertThat(messageFromKafka, Matchers.equalTo("message"+i));
}
}
示例6: run
import kafka.consumer.KafkaStream; //導入依賴的package包/類
@Override
public void run() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(transducer_topic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext() && bStartConsume){
transducerDataProcessor.newData(it.next().message());
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
示例7: consume
import kafka.consumer.KafkaStream; //導入依賴的package包/類
void consume() throws Exception {
// specify the number of consumer threads
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(KafkaProducer.TOPIC, new Integer(threadsNum));
// specify data decoder
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
.createMessageStreams(topicCountMap, keyDecoder, valueDecoder); // 三個String分別為TOPIC、Key、Value
// acquire data
List<KafkaStream<String, String>> streams = consumerMap.get(KafkaProducer.TOPIC);
// multi-threaded consume
executor = Executors.newFixedThreadPool(threadsNum); //create a thread pool
for (final KafkaStream<String, String> stream : streams) {
executor.submit(new ConsumerThread(stream)); // run thread
}
}
示例8: KafkaDataProvider
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
super(MessageAndMetadata.class);
Properties props = new Properties();
props.put("zookeeper.connect", zookeeper);
props.put("group.id", groupId);
props.put("zookeeper.session.timeout.ms", "30000");
props.put("auto.commit.interval.ms", "1000");
props.put("fetch.message.max.bytes", "4194304");
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
iter = stream.iterator();
}
示例9: start
import kafka.consumer.KafkaStream; //導入依賴的package包/類
/**
* 啟動 MessageReceiver,開始監聽topic消息
*/
@Override
public void start() {
if (consumer == null) {
//sync init
synchronized (lock) {
init();
}
}
String topicString = buildTopicsString();
Whitelist topicFilter = new Whitelist(topicString);
List<KafkaStream<byte[], byte[]>> streamList = consumer.createMessageStreamsByFilter(topicFilter, partitions);
if (org.apache.commons.collections.CollectionUtils.isEmpty(streamList))
try {
TimeUnit.MILLISECONDS.sleep(1);
} catch (InterruptedException e) {
log.warn(e.getMessage(), e);
}
processStreamsByTopic(topicString, streamList);
}
示例10: KafkaConsumer
import kafka.consumer.KafkaStream; //導入依賴的package包/類
/**
* KafkaConsumer() is constructor. It has following 4 parameters:-
* @param topic
* @param group
* @param id
* @param cc
*
*/
public KafkaConsumer(String topic, String group, String id, ConsumerConnector cc) {
fTopic = topic;
fGroup = group;
fId = id;
fConnector = cc;
fCreateTimeMs = System.currentTimeMillis();
fLastTouch = fCreateTimeMs;
fLogTag = fGroup + "(" + fId + ")/" + fTopic;
offset = 0;
state = KafkaConsumer.State.OPENED;
final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(fTopic, 1);
final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = fConnector
.createMessageStreams(topicCountMap);
final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(fTopic);
fStream = streams.iterator().next();
}
示例11: collectMq
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public void collectMq(){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(Constants.kfTopic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
MessageAndMetadata<String, String> msgMeta;
while (it.hasNext()){
msgMeta = it.next();
super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
}
}
示例12: run
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public void run(int a_numThreads) {
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, new Integer(a_numThreads));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// now launch all the threads
//
executor = Executors.newFixedThreadPool(a_numThreads);
// now create an object to consume the messages
//
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerThread(consumer, stream, threadNumber));
threadNumber++;
}
}
示例13: run
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public void run(Decoder<K> keyDecoder, Decoder<V> valueDecoder){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, threadNum);
Map<String, List<KafkaStream<K, V>>> consumerMap = consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
List<KafkaStream<K, V>> streams = consumerMap.get(topic);
executor = Executors.newFixedThreadPool(threadNum);
int threadNo = 0;
for (final KafkaStream<K, V> stream : streams) {
ConsumerWorker<K, V> worker = new ConsumerWorker<K, V>(stream, threadNo);
executor.submit(worker);
threadNo++;
}
}
示例14: consumeMessages
import kafka.consumer.KafkaStream; //導入依賴的package包/類
private void consumeMessages() {
final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC, 1);
final StringDecoder decoder =
new StringDecoder(new VerifiableProperties());
final Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap, decoder, decoder);
final KafkaStream<String, String> stream =
consumerMap.get(TOPIC).get(0);
final ConsumerIterator<String, String> iterator = stream.iterator();
Thread kafkaMessageReceiverThread = new Thread(
() -> {
while (iterator.hasNext()) {
String msg = iterator.next().message();
msg = msg == null ? "<null>" : msg;
System.out.println("got message: " + msg);
messagesReceived.add(msg);
}
},
"kafkaMessageReceiverThread"
);
kafkaMessageReceiverThread.start();
}
示例15: startup
import kafka.consumer.KafkaStream; //導入依賴的package包/類
public void startup() {
if (status != Status.INIT) {
log.error("The client has been started.");
throw new IllegalStateException("The client has been started.");
}
status = Status.RUNNING;
log.info("Streams num: " + streams.size());
tasks = new ArrayList<AbstractMessageTask>();
for (KafkaStream<String, String> stream : streams) {
AbstractMessageTask abstractMessageTask = (fixedThreadNum == 0 ? new SequentialMessageTask(
stream, handler) : new ConcurrentMessageTask(stream,
handler, fixedThreadNum));
tasks.add(abstractMessageTask);
streamThreadPool.execute(abstractMessageTask);
}
}