本文整理汇总了Java中kafka.javaapi.message.ByteBufferMessageSet类的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferMessageSet类的具体用法?Java ByteBufferMessageSet怎么用?Java ByteBufferMessageSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ByteBufferMessageSet类属于kafka.javaapi.message包,在下文中一共展示了ByteBufferMessageSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final String topic = "test2";
String clientId = "LowLevelConsumerClient1";
SimpleConsumer simpleConsumer = new SimpleConsumer(
"192.168.1.186", 9092, 6000000, 64 * 1000000, clientId);
FetchRequest req = new FetchRequestBuilder().clientId(clientId)
.addFetch(topic, 0, 0L, 1000000)
.addFetch(topic, 1, 0L, 1000000)
.addFetch(topic, 2, 0L, 1000000)
.build();
FetchResponse rep = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = rep.messageSet(topic, 0);
for(MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset : " + offset + ", Payload : " + new String(bytes, "UTF-8"));
}
}
示例2: main
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final String topic = "topic1";
String clientID = "DemoLowLevelConsumer1";
SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
FetchRequest req = new FetchRequestBuilder().clientId(clientID)
.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
}
}
示例3: createFetchedMessages
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
/**
* Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
* the offset while iterating.
*/
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
final Iterator<MessageAndOffset> messages = messageSet.iterator();
return new AbstractIterator<FetchedMessage>() {
@Override
protected FetchedMessage computeNext() {
while (messages.hasNext()) {
MessageAndOffset message = messages.next();
long msgOffset = message.offset();
if (msgOffset < offset.get()) {
LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
msgOffset, offset.get(), topicPart);
continue;
}
fetchedMessage.setPayload(message.message().payload());
fetchedMessage.setOffset(message.offset());
fetchedMessage.setNextOffset(message.nextOffset());
return fetchedMessage;
}
return endOfData();
}
};
}
示例4: getMessageSetSince
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private ByteBufferMessageSet getMessageSetSince(long offset, int timeoutInMs) {
if (timeoutInMs < 0) {
throw new IllegalArgumentException(String.format("Timeout must not lower than 0, timeout is: %d", timeoutInMs));
}
FetchRequest request = new FetchRequestBuilder()
.clientId(generateClientId())
.addFetch(assignedTopicPartition.topic(), assignedTopicPartition.partition(), offset, consumerConfig.bufferSize())
.maxWait(timeoutInMs)
.minBytes(consumerConfig.bufferSize())
.build();
FetchResponse response = partitionConsumer.fetch(request);
if (response.hasError()) {
short errorCode = response.errorCode(assignedTopicPartition.topic(), assignedTopicPartition.partition());
// @todo retry during broker failover
throw new PartitionConsumerException(ErrorMapping.exceptionFor(errorCode));
}
return response.messageSet(assignedTopicPartition.topic(), assignedTopicPartition.partition());
}
示例5: run
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
@Override
public void run()
{
long offset = 0;
while (isAlive) {
// create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();
// FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);
// get the message set from the consumer and print them out
ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
Iterator<MessageAndOffset> itr = messages.iterator();
while (itr.hasNext() && isAlive) {
MessageAndOffset msg = itr.next();
// advance the offset after consuming each message
offset = msg.offset();
logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
receiveCount++;
}
}
}
示例6: fetch
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
/** 返回消费的消息Map, 指定offset
* <li>(Key为Topic name, Value为返回消息的消息List
*
* @param topics The topic names
* @param partitions Topic position
* @param offsets Starting byte offset
* @return
* @throws Exception
*/
static Map<String, List<String>> fetch(SimpleConsumer simpleConsumer, String[] topics, int[] partitions, long[] offsets) throws Exception{
FetchRequest fetchRequest = getFetchRequest(simpleConsumer,topics, partitions, offsets);
FetchResponse fetchResponse = simpleConsumer.fetch(fetchRequest);
Map<String, List<String>> retMap = new HashMap<String, List<String>>();
for (int i = 0; i < topics.length; i++) {
String topic = topics[i];
List list = new ArrayList<String>();
retMap.put(topic, list);
ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitions[i]);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
String msg = new String(bytes, "UTF-8");
list.add(msg);
}
}
return retMap;
}
示例7: mockSimpleConsumerForRead
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private void mockSimpleConsumerForRead(SimpleConsumer mockConsumer,
String topic, int partition, long readOffset, long readSizeInBytes) {
List<MessageAndOffset> list = new ArrayList<MessageAndOffset>();
for (int i = 0; i < readSizeInBytes / eachEventInBytes; i++) {
JetstreamEvent event = new JetstreamEvent();
byte[] key = serializer.encodeMessage(event);
byte[] payload = serializer.encodeMessage(event);
Message msg = mock(Message.class);
when(msg.key()).thenReturn(ByteBuffer.wrap(key));
when(msg.payload()).thenReturn(ByteBuffer.wrap(payload));
MessageAndOffset msgOffset = new MessageAndOffset(msg, readOffset
+ i);
list.add(msgOffset);
}
ByteBufferMessageSet messageSet = mock(ByteBufferMessageSet.class);
when(messageSet.iterator()).thenReturn(list.iterator());
FetchResponse fetchResponse = mock(FetchResponse.class);
when(fetchResponse.hasError()).thenReturn(false);
when(fetchResponse.messageSet(topic, partition)).thenReturn(messageSet);
when(mockConsumer.fetch(argThat(new IsFetchRequest()))).thenReturn(
fetchResponse);
}
示例8: getIteratorFromFetchResponse
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) {
try {
ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
return Iterators.transform(messageBuffer.iterator(),
new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() {
@Override
public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) {
return new Kafka08ConsumerRecord(input);
}
});
} catch (Exception e) {
log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
+ "The remainder of this partition will be skipped.", partition, e));
return null;
}
}
示例9: reEmitPartitionBatch
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
/**
* re-emit the batch described by the meta data provided
*
* @param attempt
* @param collector
* @param partition
* @param meta
*/
private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
LOG.info("re-emitting batch, attempt " + attempt);
String instanceId = (String) meta.get("instanceId");
if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
SimpleConsumer consumer = _connections.register(partition);
long offset = (Long) meta.get("offset");
long nextOffset = (Long) meta.get("nextOffset");
ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
for (MessageAndOffset msg : msgs) {
if (offset == nextOffset) {
break;
}
if (offset > nextOffset) {
throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
}
emit(collector, msg.message());
offset = msg.nextOffset();
}
}
}
示例10: fill
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private void fill() {
long start = System.nanoTime();
ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
long end = System.nanoTime();
long millis = (end - start) / 1000000;
_fetchAPILatencyMax.update(millis);
_fetchAPILatencyMean.update(millis);
_fetchAPICallCount.incr();
int numMessages = countMessages(msgs);
_fetchAPIMessageCount.incrBy(numMessages);
if (numMessages > 0) {
LOG.info("Fetched " + numMessages + " messages from: " + _partition);
}
for (MessageAndOffset msg : msgs) {
_pending.add(_emittedToOffset);
_waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
_emittedToOffset = msg.nextOffset();
}
if (numMessages > 0) {
LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
}
}
示例11: verifyMessage
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private boolean verifyMessage(String key, String message) {
long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
Message kafkaMessage = messageAndOffset.message();
ByteBuffer messageKeyBuffer = kafkaMessage.key();
String keyString = null;
String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
if (messageKeyBuffer != null) {
keyString = new String(Utils.toByteArray(messageKeyBuffer));
}
assertEquals(key, keyString);
assertEquals(message, messageString);
return true;
}
示例12: createFetchedMessages
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
/**
* Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
* the offset while iterating.
*/
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
final Iterator<MessageAndOffset> messages = messageSet.iterator();
return new AbstractIterator<FetchedMessage>() {
@Override
protected FetchedMessage computeNext() {
while (messages.hasNext()) {
MessageAndOffset message = messages.next();
long msgOffset = message.offset();
if (msgOffset < offset.get()) {
LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
msgOffset, offset.get(), topicPart);
continue;
}
offset.set(message.nextOffset());
fetchedMessage.setPayload(message.message().payload());
fetchedMessage.setNextOffset(offset.get());
return fetchedMessage;
}
return endOfData();
}
};
}
示例13: buildOffsetFilteringIterable
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
private Iterable<MessageAndOffset> buildOffsetFilteringIterable(final ByteBufferMessageSet messageAndOffsets, final long startOffset, final long endOffset) {
return Iterables.filter(messageAndOffsets, new Predicate<MessageAndOffset>() {
@Override
public boolean apply(@Nullable MessageAndOffset input) {
// Filter messages that are either null or have an offset ∉ [startOffset; endOffset[
if(input == null || input.offset() < startOffset || (endOffset <= input.offset() && endOffset != -1)) {
return false;
}
// Check the message's checksum
// TODO We might want to have better handling of this situation, maybe try to fetch the message again?
if(!input.message().isValid()) {
LOGGER.warn("Discarded message with invalid checksum in partition {} of topic {}", _partition, _topic);
return false;
}
return true;
}
});
}
示例14: getNext
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
public boolean getNext(KafkaETLKey key, BytesWritable value) throws IOException {
if ( !hasMore() ) return false;
boolean gotNext = get(key, value);
if(_response != null) {
while ( !gotNext && _respIterator.hasNext()) {
ByteBufferMessageSet msgSet = _respIterator.next();
if ( hasError(msgSet)) return false;
_messageIt = msgSet.iterator();
gotNext = get(key, value);
}
}
return gotNext;
}
示例15: hasError
import kafka.javaapi.message.ByteBufferMessageSet; //导入依赖的package包/类
/**
* Called by the default implementation of {@link #map} to check error code
* to determine whether to continue.
*/
protected boolean hasError(ByteBufferMessageSet messages)
throws IOException {
int errorCode = messages.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
/* offset cannot cross the maximum offset (guaranteed by Kafka protocol).
Kafka server may delete old files from time to time */
System.err.println("WARNING: current offset=" + _offset + ". It is out of range.");
if (_retry >= MAX_RETRY_TIME) return true;
_retry++;
// get the current offset range
_offsetRange = getOffsetRange();
_offset = _offsetRange[0];
return false;
} else if (errorCode == ErrorMapping.InvalidMessageCode()) {
throw new IOException(_input + " current offset=" + _offset
+ " : invalid offset.");
} else if (errorCode == ErrorMapping.WrongPartitionCode()) {
throw new IOException(_input + " : wrong partition");
} else if (errorCode != ErrorMapping.NoError()) {
throw new IOException(_input + " current offset=" + _offset
+ " error:" + errorCode);
} else
return false;
}