本文整理匯總了Java中kafka.message.MessageAndMetadata.message方法的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndMetadata.message方法的具體用法?Java MessageAndMetadata.message怎麽用?Java MessageAndMetadata.message使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類kafka.message.MessageAndMetadata
的用法示例。
在下文中一共展示了MessageAndMetadata.message方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);
ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
while (true) {
MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
try {
GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
if (offsetMsg.message() == null) {
continue;
}
kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
kafkaConsumerOffsets.put(commitKey, commitValue);
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
示例2: shouldWriteThenRead
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Test
public void shouldWriteThenRead() throws Exception {
//Create a consumer
ConsumerIterator<String, String> it = buildConsumer(Original.topic);
//Create a producer
producer = new KafkaProducer<>(producerProps());
//send a message
producer.send(new ProducerRecord<>(Original.topic, "message")).get();
//read it back
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String value = messageAndMetadata.message();
assertThat(value, is("message"));
}
示例3: shouldWriteThenRead
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Test
public void shouldWriteThenRead() throws Exception {
//Create a consumer
ConsumerIterator<String, String> it = buildConsumer(SimpleKafkaTest.topic);
//Create a producer
producer = new KafkaProducer<>(producerProps());
//send a message
producer.send(new ProducerRecord<>(SimpleKafkaTest.topic, "message")).get();
//read it back
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String value = messageAndMetadata.message();
assertThat(value, is("message"));
}
示例4: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> mam = it.next();
String jsonStr = "";
try {
jsonStr = new String(mam.message());
JSONObject jsonObject = JSONObject.parseObject(jsonStr);
LogcenterConfig config = LogConfigCache.getLogConfigCache(jsonObject);
IStorageApi iStorageApi = ServiceRegister.getInstance().getProvider(config.getStorageType());
iStorageApi.save(jsonObject);
} catch (Exception e) {
e.printStackTrace();
logger.error("partition[" + mam.partition() + "]," + "offset[" + mam.offset() + "], " + jsonStr, e);
continue;
}
}
}
示例5: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> consumerIterator = it.next();
String uploadMessage = consumerIterator.message();
System.out.println(Thread.currentThread().getName()
+ " from partiton[" + consumerIterator.partition() + "]: "
+ uploadMessage);
try {
sendDataToIotdb.writeData(uploadMessage); // upload data to the IoTDB database
} catch (Exception ex) {
System.out.println("SQLException: " + ex.getMessage());
}
}
}
示例6: KafkaIndexingManager
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
@Override
public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
LindenIndexRequest indexRequest = null;
long offset = messageAndMetadata.offset();
long partition = messageAndMetadata.partition();
String message = new String(messageAndMetadata.message());
try {
indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
} catch (IOException e) {
LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
}
return indexRequest;
}
}, shardingStrategy);
}
示例7: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
logger.info(">>> Consumed: " + summary);
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例8: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
}
示例9: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
logger.info("KafkaChannel {} has stream", this.threadNumber);
final ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();
running = true;
while (running) {
try {
if (streamIterator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = streamIterator.next();
byte[] key = messageAndMetadata.key();
byte[] message = messageAndMetadata.message();
consumeMessage(key, message);
}
} catch (ConsumerTimeoutException cte) {
logger.debug("Timed out when consuming from Kafka", cte);
KafkaHealthCheck.getInstance().heartAttack(cte.getMessage());
}
}
}
示例10: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> md = it.next();
byte msg[] = md.message();
long offset = md.offset();
String smsg = new String(msg);
try {
m_loader.insertRow(new RowWithMetaData(smsg, offset), m_csvParser.parseLine(smsg));
} catch (Exception ex) {
m_log.error("Consumer stopped", ex);
System.exit(1);
}
}
}
示例11: read
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public MessageAndOffset read() throws StageException {
try {
//has next blocks indefinitely if consumer.timeout.ms is set to -1
//But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
//if no message is written to kafka topic in that time.
if(consumerIterator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
byte[] message = messageAndMetadata.message();
long offset = messageAndMetadata.offset();
int partition = messageAndMetadata.partition();
return new MessageAndOffset(message, offset, partition);
}
return null;
} catch (ConsumerTimeoutException e) {
/*For high level consumer the fetching logic is handled by a background
fetcher thread and is hidden from user, for either case of
1) broker down or
2) no message is available
the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
data into the buffer until timeout. So in a sentence the high-level consumer design is to
not let users worry about connect / reconnect issues.*/
return null;
}
}
示例12: testKafkaLogAppender
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Test
public void testKafkaLogAppender() {
Properties consumerProps = new Properties();
consumerProps.put("zookeeper.connect", zookeeper);
consumerProps.put("group.id", "kafka-log-appender-test");
consumerProps.put("auto.offset.reset", "smallest");
consumerProps.put("schema.registry.url", schemaRegistry);
Map<String, Integer> topicMap = new HashMap<String, Integer>();
topicMap.put(topic, 1);
ConsumerIterator<String, Object> iterator = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps))
.createMessageStreams(topicMap, new StringDecoder(null), new KafkaAvroDecoder(new VerifiableProperties(consumerProps)))
.get(topic).get(0).iterator();
String testMessage = "I am a test message";
logger.info(testMessage);
MessageAndMetadata<String, Object> messageAndMetadata = iterator.next();
GenericRecord logLine = (GenericRecord) messageAndMetadata.message();
assertEquals(logLine.get("line").toString(), testMessage);
assertEquals(logLine.get("logtypeid"), KafkaLogAppender.InfoLogTypeId);
assertNotNull(logLine.get("source"));
assertEquals(((Map<CharSequence, Object>) logLine.get("timings")).size(), 1);
assertEquals(((Map<CharSequence, Object>) logLine.get("tag")).size(), 2);
}
示例13: run
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
try {
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String message = messageAndMetadata.message();
messageHandler.handle(message);
meter.mark();
}
messageHandler.flush();
} catch (ConsumerTimeoutException e) {
messageHandler.flush();
}
}
示例14: processMessage
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
protected void processMessage(MessageAndMetadata<byte[], byte[]> message) {
try {
Collection<Either<JobSpec, URI>> parsedCollection = parseJobSpec(message.message());
for (Either<JobSpec, URI> parsedMessage : parsedCollection) {
if (parsedMessage instanceof Either.Left) {
this.newSpecs.inc();
this.jobCatalog.put(((Either.Left<JobSpec, URI>) parsedMessage).getLeft());
} else if (parsedMessage instanceof Either.Right) {
this.remmovedSpecs.inc();
this.jobCatalog.remove(((Either.Right<JobSpec, URI>) parsedMessage).getRight());
}
}
} catch (IOException ioe) {
String messageStr = new String(message.message(), Charsets.UTF_8);
log.error(String.format("Failed to parse kafka message with offset %d: %s.", message.offset(), messageStr), ioe);
}
}
示例15: receive
import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public BaseConsumerRecord receive() {
if (!_iter.hasNext())
return null;
MessageAndMetadata<String, String> record = _iter.next();
return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.message());
}