本文整理匯總了Java中kafka.message.MessageAndOffset類的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndOffset類的具體用法?Java MessageAndOffset怎麽用?Java MessageAndOffset使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
MessageAndOffset類屬於kafka.message包,在下文中一共展示了MessageAndOffset類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import kafka.message.MessageAndOffset; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "test2";
String clientId = "LowLevelConsumerClient1";
SimpleConsumer simpleConsumer = new SimpleConsumer(
"192.168.1.186", 9092, 6000000, 64 * 1000000, clientId);
FetchRequest req = new FetchRequestBuilder().clientId(clientId)
.addFetch(topic, 0, 0L, 1000000)
.addFetch(topic, 1, 0L, 1000000)
.addFetch(topic, 2, 0L, 1000000)
.build();
FetchResponse rep = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = rep.messageSet(topic, 0);
for(MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset : " + offset + ", Payload : " + new String(bytes, "UTF-8"));
}
}
示例2: main
import kafka.message.MessageAndOffset; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "topic1";
String clientID = "DemoLowLevelConsumer1";
SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
FetchRequest req = new FetchRequestBuilder().clientId(clientID)
.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
}
}
示例3: fetchLatestRecordPayloadBytes
import kafka.message.MessageAndOffset; //導入依賴的package包/類
private byte[] fetchLatestRecordPayloadBytes(SimpleConsumer kafkaConsumer) {
FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(destinationTopic, 0, 0, 1000000).build();
FetchResponse response = kafkaConsumer.fetch(fetchRequest);
Iterator<MessageAndOffset> messageSetItr = response.messageSet(destinationTopic, 0).iterator();
// Fast forward to the message at the latest offset in the topic
MessageAndOffset latestMessage = new MessageAndOffset(new Message(new byte[] { }), 0L);
while (messageSetItr.hasNext()) {
latestMessage = messageSetItr.next();
}
ByteBuffer payload = latestMessage.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
return bytes;
}
示例4: createFetchedMessages
import kafka.message.MessageAndOffset; //導入依賴的package包/類
/**
* Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
* the offset while iterating.
*/
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
final Iterator<MessageAndOffset> messages = messageSet.iterator();
return new AbstractIterator<FetchedMessage>() {
@Override
protected FetchedMessage computeNext() {
while (messages.hasNext()) {
MessageAndOffset message = messages.next();
long msgOffset = message.offset();
if (msgOffset < offset.get()) {
LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
msgOffset, offset.get(), topicPart);
continue;
}
fetchedMessage.setPayload(message.message().payload());
fetchedMessage.setOffset(message.offset());
fetchedMessage.setNextOffset(message.nextOffset());
return fetchedMessage;
}
return endOfData();
}
};
}
示例5: run
import kafka.message.MessageAndOffset; //導入依賴的package包/類
@Override
public void run()
{
long offset = 0;
while (isAlive) {
// create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();
// FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);
// get the message set from the consumer and print them out
ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
Iterator<MessageAndOffset> itr = messages.iterator();
while (itr.hasNext() && isAlive) {
MessageAndOffset msg = itr.next();
// advance the offset after consuming each message
offset = msg.offset();
logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
receiveCount++;
}
}
}
示例6: fetchNextMessageBuffer
import kafka.message.MessageAndOffset; //導入依賴的package包/類
@Override
protected Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset,
long maxOffset) {
if (nextOffset > maxOffset) {
return null;
}
FetchRequest fetchRequest = createFetchRequest(partition, nextOffset);
try {
FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition);
return getIteratorFromFetchResponse(fetchResponse, partition);
} catch (Exception e) {
LOG.warn(
String.format("Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry",
partition, e));
return refreshTopicMetadataAndRetryFetch(partition, fetchRequest);
}
}
示例7: decodeRecord
import kafka.message.MessageAndOffset; //導入依賴的package包/類
@Override
protected GenericRecord decodeRecord(MessageAndOffset messageAndOffset) throws SchemaNotFoundException, IOException {
byte[] payload = getBytes(messageAndOffset.message().payload());
if (payload[0] != KafkaAvroSchemaRegistry.MAGIC_BYTE) {
throw new RuntimeException(String.format("Unknown magic byte for partition %s", this.getCurrentPartition()));
}
byte[] schemaIdByteArray = Arrays.copyOfRange(payload, 1, 1 + KafkaAvroSchemaRegistry.SCHEMA_ID_LENGTH_BYTE);
String schemaId = Hex.encodeHexString(schemaIdByteArray);
Schema schema = null;
schema = this.schemaRegistry.getSchemaById(schemaId);
reader.get().setSchema(schema);
Decoder binaryDecoder =
DecoderFactory.get().binaryDecoder(payload, 1 + KafkaAvroSchemaRegistry.SCHEMA_ID_LENGTH_BYTE,
payload.length - 1 - KafkaAvroSchemaRegistry.SCHEMA_ID_LENGTH_BYTE, null);
try {
GenericRecord record = reader.get().read(null, binaryDecoder);
record = AvroUtils.convertRecordSchema(record, this.schema.get());
return record;
} catch (IOException e) {
LOG.error(String.format("Error during decoding record for partition %s: ", this.getCurrentPartition()));
throw e;
}
}
示例8: fetch
import kafka.message.MessageAndOffset; //導入依賴的package包/類
/** 返回消費的消息Map, 指定offset
* <li>(Key為Topic name, Value為返回消息的消息List
*
* @param topics The topic names
* @param partitions Topic position
* @param offsets Starting byte offset
* @return
* @throws Exception
*/
static Map<String, List<String>> fetch(SimpleConsumer simpleConsumer, String[] topics, int[] partitions, long[] offsets) throws Exception{
FetchRequest fetchRequest = getFetchRequest(simpleConsumer,topics, partitions, offsets);
FetchResponse fetchResponse = simpleConsumer.fetch(fetchRequest);
Map<String, List<String>> retMap = new HashMap<String, List<String>>();
for (int i = 0; i < topics.length; i++) {
String topic = topics[i];
List list = new ArrayList<String>();
retMap.put(topic, list);
ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitions[i]);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
String msg = new String(bytes, "UTF-8");
list.add(msg);
}
}
return retMap;
}
示例9: advanceNextPosition
import kafka.message.MessageAndOffset; //導入依賴的package包/類
@Override
public boolean advanceNextPosition()
{
while (true) {
if (cursorOffset >= split.getEnd()) {
return endOfData(); // Split end is exclusive.
}
// Create a fetch request
openFetchRequest();
while (messageAndOffsetIterator.hasNext()) {
MessageAndOffset currentMessageAndOffset = messageAndOffsetIterator.next();
long messageOffset = currentMessageAndOffset.offset();
if (messageOffset >= split.getEnd()) {
return endOfData(); // Past our split end. Bail.
}
if (messageOffset >= cursorOffset) {
return nextRow(currentMessageAndOffset);
}
}
messageAndOffsetIterator = null;
}
}
示例10: mockSimpleConsumerForRead
import kafka.message.MessageAndOffset; //導入依賴的package包/類
private void mockSimpleConsumerForRead(SimpleConsumer mockConsumer,
String topic, int partition, long readOffset, long readSizeInBytes) {
List<MessageAndOffset> list = new ArrayList<MessageAndOffset>();
for (int i = 0; i < readSizeInBytes / eachEventInBytes; i++) {
JetstreamEvent event = new JetstreamEvent();
byte[] key = serializer.encodeMessage(event);
byte[] payload = serializer.encodeMessage(event);
Message msg = mock(Message.class);
when(msg.key()).thenReturn(ByteBuffer.wrap(key));
when(msg.payload()).thenReturn(ByteBuffer.wrap(payload));
MessageAndOffset msgOffset = new MessageAndOffset(msg, readOffset
+ i);
list.add(msgOffset);
}
ByteBufferMessageSet messageSet = mock(ByteBufferMessageSet.class);
when(messageSet.iterator()).thenReturn(list.iterator());
FetchResponse fetchResponse = mock(FetchResponse.class);
when(fetchResponse.hasError()).thenReturn(false);
when(fetchResponse.messageSet(topic, partition)).thenReturn(messageSet);
when(mockConsumer.fetch(argThat(new IsFetchRequest()))).thenReturn(
fetchResponse);
}
示例11: readMessages
import kafka.message.MessageAndOffset; //導入依賴的package包/類
public List<byte[]> readMessages(String topic) {
SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer");
FetchRequest req = new FetchRequestBuilder()
.clientId("consumer")
.addFetch(topic, 0, 0, 100000)
.build();
FetchResponse fetchResponse = consumer.fetch(req);
Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator();
List<byte[]> messages = new ArrayList<>();
while(results.hasNext()) {
ByteBuffer payload = results.next().message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
messages.add(bytes);
}
consumer.close();
return messages;
}
示例12: getIteratorFromFetchResponse
import kafka.message.MessageAndOffset; //導入依賴的package包/類
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) {
try {
ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
return Iterators.transform(messageBuffer.iterator(),
new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() {
@Override
public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) {
return new Kafka08ConsumerRecord(input);
}
});
} catch (Exception e) {
log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
+ "The remainder of this partition will be skipped.", partition, e));
return null;
}
}
示例13: reEmitPartitionBatch
import kafka.message.MessageAndOffset; //導入依賴的package包/類
/**
* re-emit the batch described by the meta data provided
*
* @param attempt
* @param collector
* @param partition
* @param meta
*/
private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
LOG.info("re-emitting batch, attempt " + attempt);
String instanceId = (String) meta.get("instanceId");
if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
SimpleConsumer consumer = _connections.register(partition);
long offset = (Long) meta.get("offset");
long nextOffset = (Long) meta.get("nextOffset");
ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
for (MessageAndOffset msg : msgs) {
if (offset == nextOffset) {
break;
}
if (offset > nextOffset) {
throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
}
emit(collector, msg.message());
offset = msg.nextOffset();
}
}
}
示例14: fill
import kafka.message.MessageAndOffset; //導入依賴的package包/類
private void fill() {
long start = System.nanoTime();
ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
long end = System.nanoTime();
long millis = (end - start) / 1000000;
_fetchAPILatencyMax.update(millis);
_fetchAPILatencyMean.update(millis);
_fetchAPICallCount.incr();
int numMessages = countMessages(msgs);
_fetchAPIMessageCount.incrBy(numMessages);
if (numMessages > 0) {
LOG.info("Fetched " + numMessages + " messages from: " + _partition);
}
for (MessageAndOffset msg : msgs) {
_pending.add(_emittedToOffset);
_waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
_emittedToOffset = msg.nextOffset();
}
if (numMessages > 0) {
LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
}
}
示例15: verifyMessage
import kafka.message.MessageAndOffset; //導入依賴的package包/類
private boolean verifyMessage(String key, String message) {
long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
Message kafkaMessage = messageAndOffset.message();
ByteBuffer messageKeyBuffer = kafkaMessage.key();
String keyString = null;
String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
if (messageKeyBuffer != null) {
keyString = new String(Utils.toByteArray(messageKeyBuffer));
}
assertEquals(key, keyString);
assertEquals(message, messageString);
return true;
}