本文整理汇总了Java中kafka.message.MessageAndMetadata.key方法的典型用法代码示例。如果您正苦于以下问题:Java MessageAndMetadata.key方法的具体用法?Java MessageAndMetadata.key怎么用?Java MessageAndMetadata.key使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.message.MessageAndMetadata
的用法示例。
在下文中一共展示了MessageAndMetadata.key方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
Iote2eRequestReuseItem iote2eRequestReuseItem = new Iote2eRequestReuseItem();
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
try {
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType() +
", iote2eRequest=" + iote2eRequestReuseItem.fromByteArray(messageAndMetadata.message()).toString();
logger.info(">>> Consumed: " + summary);
} catch( Exception e ) {
logger.error(e.getMessage(), e);
}
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例2: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
logger.info(">>> Consumed: " + summary);
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例3: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + m_threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+m_threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例4: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
}
示例5: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
logger.info("KafkaChannel {} has stream", this.threadNumber);
final ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();
running = true;
while (running) {
try {
if (streamIterator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = streamIterator.next();
byte[] key = messageAndMetadata.key();
byte[] message = messageAndMetadata.message();
consumeMessage(key, message);
}
} catch (ConsumerTimeoutException cte) {
logger.debug("Timed out when consuming from Kafka", cte);
KafkaHealthCheck.getInstance().heartAttack(cte.getMessage());
}
}
}
示例6: receive
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
public BaseConsumerRecord receive() {
if (!_iter.hasNext())
return null;
MessageAndMetadata<String, String> record = _iter.next();
return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.message());
}
示例7: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
BinaryDecoder binaryDecoder = null;
Weather weatherRead = null;
DatumReader<Weather> datumReaderWeather = new SpecificDatumReader<Weather>(Weather.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
weatherRead = datumReaderWeather.read(weatherRead, binaryDecoder);
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType()
+ ", weatherRead=" + weatherRead.toString();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例8: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
BinaryDecoder binaryDecoder = null;
Wave waveRead = null;
DatumReader<Wave> datumReaderWave = new SpecificDatumReader<Wave>(Wave.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
waveRead = datumReaderWave.read(waveRead, binaryDecoder);
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = ">>> CONSUMER: Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType()
+ ", waveRead=" + waveRead.toString();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例9: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void run() {
for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : (Iterable<MessageAndMetadata<byte[], byte[]>>) stream) {
String topic = messageAndMetadata.topic();
byte[] key = messageAndMetadata.key();
byte[] message = messageAndMetadata.message();
sendText(topic, key, message);
}
}
示例10: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<String, String> iter = stream.iterator();
while (iter.hasNext()) {
MessageAndMetadata<String, String> msg = iter.next();
Envelope message = new Envelope(msg.key(), msg.message(), msg.topic());
task.process(message, producer);
}
}
示例11: Message
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public Message(MessageAndMetadata<byte[], byte[]> message) {
this.topic = message.topic();
this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null;
this.message = new String(message.message(), Charset.forName("utf-8"));
this.partition = message.partition();
this.offset = message.offset();
}
示例12: process
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public Status process() throws EventDeliveryException {
byte[] kafkaMessage;
byte[] kafkaKey;
Event event;
Map<String, String> headers;
long batchStartTime = System.currentTimeMillis();
long batchEndTime = System.currentTimeMillis() + timeUpperLimit;
try {
boolean iterStatus = false;
long startTime = System.nanoTime();
while (eventList.size() < batchUpperLimit &&
System.currentTimeMillis() < batchEndTime) {
iterStatus = hasNext();
if (iterStatus) {
// get next message
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
kafkaMessage = messageAndMetadata.message();
kafkaKey = messageAndMetadata.key();
// Add headers to event (topic, timestamp, and key)
headers = new HashMap<String, String>();
headers.put(KafkaSourceConstants.TIMESTAMP,
String.valueOf(System.currentTimeMillis()));
headers.put(KafkaSourceConstants.TOPIC, topic);
if (kafkaKey != null) {
headers.put(KafkaSourceConstants.KEY, new String(kafkaKey));
}
if (log.isDebugEnabled()) {
log.debug("Message: {}", new String(kafkaMessage));
}
event = EventBuilder.withBody(kafkaMessage, headers);
eventList.add(event);
}
if (log.isDebugEnabled()) {
log.debug("Waited: {} ", System.currentTimeMillis() - batchStartTime);
log.debug("Event #: {}", eventList.size());
}
}
long endTime = System.nanoTime();
counter.addToKafkaEventGetTimer((endTime-startTime)/(1000*1000));
counter.addToEventReceivedCount(Long.valueOf(eventList.size()));
// If we have events, send events to channel
// clear the event list
// and commit if Kafka doesn't auto-commit
if (eventList.size() > 0) {
getChannelProcessor().processEventBatch(eventList);
counter.addToEventAcceptedCount(eventList.size());
eventList.clear();
if (log.isDebugEnabled()) {
log.debug("Wrote {} events to channel", eventList.size());
}
if (!kafkaAutoCommitEnabled) {
// commit the read transactions to Kafka to avoid duplicates
long commitStartTime = System.nanoTime();
consumer.commitOffsets();
long commitEndTime = System.nanoTime();
counter.addToKafkaCommitTimer((commitEndTime-commitStartTime)/(1000*1000));
}
}
if (!iterStatus) {
if (log.isDebugEnabled()) {
counter.incrementKafkaEmptyCount();
log.debug("Returning with backoff. No more data to read");
}
return Status.BACKOFF;
}
return Status.READY;
} catch (Exception e) {
log.error("KafkaSource EXCEPTION, {}", e);
return Status.BACKOFF;
}
}
示例13: process
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public Status process() throws EventDeliveryException {
long batchStartTime = System.currentTimeMillis();
long batchEndTime = System.currentTimeMillis() + (long)this.timeUpperLimit;
try {
boolean e = false;
long startTime = System.nanoTime();
while(this.eventList.size() < this.batchUpperLimit && System.currentTimeMillis() < batchEndTime) {
e = this.hasNext();
if(e) {
MessageAndMetadata endTime = this.it.next();
byte[] kafkaMessage = (byte[])endTime.message();
byte[] kafkaKey = (byte[])endTime.key();
HashMap headers = new HashMap();
headers.put("timestamp", String.valueOf(System.currentTimeMillis()));
headers.put("topic", this.topic);
if(kafkaKey != null) {
headers.put("key", new String(kafkaKey));
}
if(log.isDebugEnabled()) {
log.debug("Message: {}", new String(kafkaMessage));
}
Event event = EventBuilder.withBody(kafkaMessage, headers);
this.eventList.add(event);
}
if(log.isDebugEnabled()) {
log.debug("Waited: {} ", Long.valueOf(System.currentTimeMillis() - batchStartTime));
log.debug("Event #: {}", Integer.valueOf(this.eventList.size()));
}
}
long endTime1 = System.nanoTime();
this.counter.addToKafkaEventGetTimer((endTime1 - startTime) / 1000000L);
this.counter.addToEventReceivedCount(Long.valueOf((long)this.eventList.size()).longValue());
if(this.eventList.size() > 0) {
this.getChannelProcessor().processEventBatch(this.eventList);
this.counter.addToEventAcceptedCount((long)this.eventList.size());
this.eventList.clear();
if(log.isDebugEnabled()) {
log.debug("Wrote {} events to channel", Integer.valueOf(this.eventList.size()));
}
if(!this.kafkaAutoCommitEnabled) {
long commitStartTime = System.nanoTime();
this.consumer.commitOffsets();
long commitEndTime = System.nanoTime();
this.counter.addToKafkaCommitTimer((commitEndTime - commitStartTime) / 1000000L);
}
}
if(!e) {
if(log.isDebugEnabled()) {
this.counter.incrementKafkaEmptyCount();
log.debug("Returning with backoff. No more data to read");
}
//Thread.sleep(10);
return Status.READY;
} else {
return Status.READY;
}
} catch (Exception var18) {
log.error("KafkaSource EXCEPTION, {}", var18);
return Status.BACKOFF;
}
}
示例14: call
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
public Tuple2<byte[], byte[]> call(MessageAndMetadata<byte[], byte[]> v1) throws Exception {
return new Tuple2<>(v1.key(), v1.message());
}
示例15: nextMetaMessage
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public IngestionMetaMessage nextMetaMessage() {
MessageAndMetadata<Long, String> meta = it.next();
return new IngestionMetaMessage(meta.key(), meta.message(), meta.topic(), meta.partition(), meta.offset());
}