本文整理匯總了Java中kafka.message.MessageAndMetadata類的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndMetadata類的具體用法?Java MessageAndMetadata怎麽用?Java MessageAndMetadata使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
MessageAndMetadata類屬於kafka.message包,在下文中一共展示了MessageAndMetadata類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);
ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
while (true) {
MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
try {
GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
if (offsetMsg.message() == null) {
continue;
}
kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
kafkaConsumerOffsets.put(commitKey, commitValue);
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
示例2: getNextMessage
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public MessageAndMetadata getNextMessage(String topic) {
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// it has only a single stream, because there is only one consumer
KafkaStream stream = streams.get(0);
final ConsumerIterator<byte[], byte[]> it = stream.iterator();
int counter = 0;
try {
if (it.hasNext()) {
return it.next();
} else {
return null;
}
} catch (ConsumerTimeoutException e) {
logger.error("0 messages available to fetch for the topic " + topic);
return null;
}
}
示例3: shouldWriteThenRead
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
@Test
public void shouldWriteThenRead() throws Exception {
//Create a consumer
ConsumerIterator<String, String> it = buildConsumer(Original.topic);
//Create a producer
producer = new KafkaProducer<>(producerProps());
//send a message
producer.send(new ProducerRecord<>(Original.topic, "message")).get();
//read it back
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String value = messageAndMetadata.message();
assertThat(value, is("message"));
}
示例4: shouldWriteThenRead
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
@Test
public void shouldWriteThenRead() throws Exception {
//Create a consumer
ConsumerIterator<String, String> it = buildConsumer(SimpleKafkaTest.topic);
//Create a producer
producer = new KafkaProducer<>(producerProps());
//send a message
producer.send(new ProducerRecord<>(SimpleKafkaTest.topic, "message")).get();
//read it back
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String value = messageAndMetadata.message();
assertThat(value, is("message"));
}
示例5: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> mam = it.next();
String jsonStr = "";
try {
jsonStr = new String(mam.message());
JSONObject jsonObject = JSONObject.parseObject(jsonStr);
LogcenterConfig config = LogConfigCache.getLogConfigCache(jsonObject);
IStorageApi iStorageApi = ServiceRegister.getInstance().getProvider(config.getStorageType());
iStorageApi.save(jsonObject);
} catch (Exception e) {
e.printStackTrace();
logger.error("partition[" + mam.partition() + "]," + "offset[" + mam.offset() + "], " + jsonStr, e);
continue;
}
}
}
示例6: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public void run() {
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> consumerIterator = it.next();
String uploadMessage = consumerIterator.message();
System.out.println(Thread.currentThread().getName()
+ " from partiton[" + consumerIterator.partition() + "]: "
+ uploadMessage);
try {
sendDataToIotdb.writeData(uploadMessage); // upload data to the IoTDB database
} catch (Exception ex) {
System.out.println("SQLException: " + ex.getMessage());
}
}
}
示例7: KafkaDataProvider
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
super(MessageAndMetadata.class);
Properties props = new Properties();
props.put("zookeeper.connect", zookeeper);
props.put("group.id", groupId);
props.put("zookeeper.session.timeout.ms", "30000");
props.put("auto.commit.interval.ms", "1000");
props.put("fetch.message.max.bytes", "4194304");
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
iter = stream.iterator();
}
示例8: KafkaIndexingManager
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
@Override
public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
LindenIndexRequest indexRequest = null;
long offset = messageAndMetadata.offset();
long partition = messageAndMetadata.partition();
String message = new String(messageAndMetadata.message());
try {
indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
} catch (IOException e) {
LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
}
return indexRequest;
}
}, shardingStrategy);
}
示例9: initIndexingManger
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public static IndexingManager initIndexingManger(LindenConfig config, ShardingStrategy shardingStrategy,
LindenCore lindenCore)
throws IOException {
IndexingManager indexingManager = null;
LindenPluginManager pluginManager = config.getPluginManager();
LindenGateway gateway = pluginManager.getInstance(LindenConfigBuilder.GATEWAY, LindenGateway.class);
if (gateway != null) {
DataProvider dataProvider = gateway.buildDataProvider();
if (dataProvider != null) {
if (dataProvider.getType() == String.class) {
indexingManager = new StringIndexingManager(config, shardingStrategy, lindenCore, dataProvider);
} else if (dataProvider.getType() == MessageAndMetadata.class) {
indexingManager = new KafkaIndexingManager(config, shardingStrategy, lindenCore, dataProvider);
} else {
throw new IOException("Unsupported data provider type");
}
indexingManager.start();
}
}
return indexingManager;
}
示例10: collectMq
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public void collectMq(){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(Constants.kfTopic, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
ConsumerIterator<String, String> it = stream.iterator();
MessageAndMetadata<String, String> msgMeta;
while (it.hasNext()){
msgMeta = it.next();
super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
}
}
示例11: accept
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
void accept(MessageAndMetadata<byte[],byte[]> rec) {
try {
trace.trace("{} received rec for topic:{} partition:{} offset:{}",
id(), rec.topic(), rec.partition(), rec.offset());
T tuple;
if (stringToTupleFn != null)
tuple = stringToTupleFn.apply(new StringConsumerRecord(rec));
else
tuple = byteToTupleFn.apply(new ByteConsumerRecord(rec));
eventSubmitter.accept(tuple);
}
catch (Exception e) {
String tp = String.format("[%s,%d]", rec.topic(), rec.partition());
trace.error("{} failure processing record from {}", id(), tp, e);
}
}
示例12: readTopicToList
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
/**
* Read topic to list, only using Kafka code.
*/
private static List<MessageAndMetadata<byte[], byte[]>> readTopicToList(String topicName, ConsumerConfig config, final int stopAfter) {
ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
// we request only one stream per consumer instance. Kafka will make sure that each consumer group
// will see each message only once.
Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
if (streams.size() != 1) {
throw new RuntimeException("Expected only one message stream but got "+streams.size());
}
List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
if (kafkaStreams == null) {
throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
}
if (kafkaStreams.size() != 1) {
throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
}
LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();
List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
int read = 0;
while(iteratorToRead.hasNext()) {
read++;
result.add(iteratorToRead.next());
if (read == stopAfter) {
LOG.info("Read "+read+" elements");
return result;
}
}
return result;
}
示例13: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public void run() {
Iote2eRequestReuseItem iote2eRequestReuseItem = new Iote2eRequestReuseItem();
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
try {
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType() +
", iote2eRequest=" + iote2eRequestReuseItem.fromByteArray(messageAndMetadata.message()).toString();
logger.info(">>> Consumed: " + summary);
} catch( Exception e ) {
logger.error(e.getMessage(), e);
}
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例14: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
logger.info(">>> Consumed: " + summary);
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例15: run
import kafka.message.MessageAndMetadata; //導入依賴的package包/類
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + m_threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+m_threadNumber);
System.out.println(e);
e.printStackTrace();
}
}