本文整理匯總了Java中kafka.message.ByteBufferMessageSet類的典型用法代碼示例。如果您正苦於以下問題:Java ByteBufferMessageSet類的具體用法?Java ByteBufferMessageSet怎麽用?Java ByteBufferMessageSet使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ByteBufferMessageSet類屬於kafka.message包,在下文中一共展示了ByteBufferMessageSet類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: sizeInBytes
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public int sizeInBytes() {
return 2 + /* versionId */
4 + /* correlationId */
shortStringLength(clientId) + /* client id */
2 + /* requiredAcks */
4 + /* ackTimeoutMs */
4 + /* number of topics */
Utils.foldLeft(dataGroupedByTopic, 0,
new Function3<Integer, String, Map<TopicAndPartition, ByteBufferMessageSet>, Integer>() {
@Override
public Integer apply(Integer folded, String topic, Map<TopicAndPartition, ByteBufferMessageSet> currTopic) {
return folded + shortStringLength(topic)
+ 4 /* the number of partitions */
+ Utils.foldLeft(currTopic, 0, new Function3<Integer, TopicAndPartition, ByteBufferMessageSet, Integer>() {
@Override
public Integer apply(Integer arg1, TopicAndPartition arg2, ByteBufferMessageSet arg3) {
return arg1 +
4 + /* partition id */
4 + /* byte-length of serialized messages */
arg3.sizeInBytes();
}
});
}
});
}
示例2: handleError
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
@Override
public void handleError(final Throwable e, RequestChannel requestChannel, Request request) {
if (((ProducerRequest) request.requestObj).requiredAcks == 0) {
requestChannel.closeConnection(request.processor, request);
} else {
Map<TopicAndPartition, ProducerResponseStatus> producerResponseStatus = Utils.map(data, new Function2<TopicAndPartition, ByteBufferMessageSet, Tuple2<TopicAndPartition, ProducerResponseStatus>>() {
@Override
public Tuple2<TopicAndPartition, ProducerResponseStatus> apply(TopicAndPartition arg1, ByteBufferMessageSet arg2) {
return Tuple2.make(arg1, new ProducerResponseStatus(ErrorMapping.codeFor(e.getClass()), -1l));
}
});
ProducerResponse errorResponse = new ProducerResponse(correlationId, producerResponseStatus);
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(errorResponse)));
}
}
示例3: simpleRequest
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
@Test
public void simpleRequest() throws IOException {
Socket socket = connect();
int correlationId = -1;
String clientId = SyncProducerConfigs.DefaultClientId;
int ackTimeoutMs = SyncProducerConfigs.DefaultAckTimeoutMs;
short ack = SyncProducerConfigs.DefaultRequiredAcks;
ProducerRequest emptyRequest =
new ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, Maps.<TopicAndPartition, ByteBufferMessageSet>newHashMap());
ByteBuffer byteBuffer = ByteBuffer.allocate(emptyRequest.sizeInBytes());
emptyRequest.writeTo(byteBuffer);
byteBuffer.rewind();
byte[] serializedBytes = new byte[byteBuffer.remaining()];
byteBuffer.get(serializedBytes);
sendRequest(socket, (short) 0, serializedBytes);
processRequest(server.requestChannel);
assertArrayEquals(serializedBytes, receiveResponse(socket));
}
示例4: testTimeBasedFlush
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
/**
* Test that flush is invoked by the background scheduler thread.
*/
@Test
public void testTimeBasedFlush() {
logManager.shutdown();
LogConfig config = logConfig.clone();
config.flushMs = 1000;
logManager = new LogManager(Lists.newArrayList(logDir), Maps.<String, LogConfig>newHashMap(),
config, cleanerConfig, 1000L, 10000L, 1000L, time.scheduler, time);
logManager.startup();
Log log = logManager.createLog(new TopicAndPartition(name, 0), config);
long lastFlush = log.lastFlushTime();
for (int i = 0; i < 200; ++i) {
ByteBufferMessageSet set = TestUtils.singleMessageSet("test".getBytes());
log.append(set);
}
time.sleep(logManager.InitialTaskDelayMs);
assertTrue("Time based flush should have been triggered triggered", lastFlush != log.lastFlushTime());
}
示例5: getShutdownReceive
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public static ByteBuffer getShutdownReceive() {
ProducerRequest emptyProducerRequest = new ProducerRequest((short) 0, 0, "", (short) 0, 0,
Maps.<TopicAndPartition, ByteBufferMessageSet>newHashMap());
ByteBuffer byteBuffer = ByteBuffer.allocate(emptyProducerRequest.sizeInBytes() + 2);
byteBuffer.putShort(RequestKeys.ProduceKey);
emptyProducerRequest.writeTo(byteBuffer);
byteBuffer.rewind();
return byteBuffer;
}
示例6: enqueue
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
/**
* Enqueue a message set for processing.
*/
public void enqueue(ByteBufferMessageSet messages) {
int size = messages.validBytes();
if (size > 0) {
long next = Utils.last(Lists.newArrayList(messages.shallowIterator())).nextOffset();
logger.trace("Updating fetch offset = {} to {}", fetchedOffset.get(), next);
Utils.put(chunkQueue, new FetchedDataChunk(messages, this, fetchedOffset.get()));
fetchedOffset.set(next);
logger.debug("updated fetch offset of ({}) to {}", this, next);
consumerTopicStats.getConsumerTopicStats(topic).byteRate.mark(size);
consumerTopicStats.getConsumerAllTopicStats().byteRate.mark(size);
} else if (messages.sizeInBytes() > 0) {
Utils.put(chunkQueue, new FetchedDataChunk(messages, this, fetchedOffset.get()));
}
}
示例7: processPartitionData
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
@Override
public void processPartitionData(TopicAndPartition topicAndPartition, long fetchOffset, FetchResponsePartitionData partitionData) {
PartitionTopicInfo pti = partitionMap.get(topicAndPartition);
if (pti.getFetchOffset() != fetchOffset)
throw new KafkaException("Offset doesn't match for partition [%s,%d] pti offset: %d fetch offset: %d",
topicAndPartition.topic, topicAndPartition.partition, pti.getFetchOffset(), fetchOffset);
pti.enqueue((ByteBufferMessageSet) partitionData.messages);
}
示例8: readFrom
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public static FetchResponsePartitionData readFrom(ByteBuffer buffer) {
short error = buffer.getShort();
long hw = buffer.getLong();
int messageSetSize = buffer.getInt();
ByteBuffer messageSetBuffer = buffer.slice();
messageSetBuffer.limit(messageSetSize);
buffer.position(buffer.position() + messageSetSize);
return new FetchResponsePartitionData(error, hw, new ByteBufferMessageSet(messageSetBuffer));
}
示例9: readFrom
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
@Override
public RequestOrResponse readFrom(final ByteBuffer buffer) {
final short versionId = buffer.getShort();
final int correlationId = buffer.getInt();
final String clientId = readShortString(buffer);
final short requiredAcks = buffer.getShort();
final int ackTimeoutMs = buffer.getInt();
//build the topic structure
int topicCount = buffer.getInt();
final Map<TopicAndPartition, ByteBufferMessageSet> partitionData =
Utils.flatMaps(1, topicCount, new Function0<Map<TopicAndPartition, ByteBufferMessageSet>>() {
@Override
public Map<TopicAndPartition, ByteBufferMessageSet> apply() {
// process topic
final String topic = readShortString(buffer);
int partitionCount = buffer.getInt();
return Utils.map(1, partitionCount, new Function0<Tuple2<TopicAndPartition, ByteBufferMessageSet>>() {
@Override
public Tuple2<TopicAndPartition, ByteBufferMessageSet> apply() {
int partition = buffer.getInt();
int messageSetSize = buffer.getInt();
byte[] messageSetBuffer = new byte[messageSetSize];
buffer.get(messageSetBuffer, 0, messageSetSize);
return Tuple2.make(new TopicAndPartition(topic, partition),
new ByteBufferMessageSet(ByteBuffer.wrap(messageSetBuffer)));
}
});
}
});
return new ProducerRequest(versionId, correlationId, clientId, requiredAcks, ackTimeoutMs, partitionData);
}
示例10: ProducerRequest
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public ProducerRequest(short versionId,
int correlationId,
String clientId,
short requiredAcks,
int ackTimeoutMs,
Map<TopicAndPartition, ByteBufferMessageSet> data) {
super(RequestKeys.ProduceKey, correlationId);
this.versionId = versionId;
this.clientId = clientId;
this.requiredAcks = requiredAcks;
this.ackTimeoutMs = ackTimeoutMs;
this.data = data;
dataGroupedByTopic = Utils.groupBy(data, new Function2<TopicAndPartition, ByteBufferMessageSet, String>() {
@Override
public String apply(TopicAndPartition topicAndPartition, ByteBufferMessageSet messageSet) {
return topicAndPartition.topic;
}
});
topicPartitionMessageSizeMap = Utils.map(data, new Function2<TopicAndPartition, ByteBufferMessageSet, Tuple2<TopicAndPartition, Integer>>() {
@Override
public Tuple2<TopicAndPartition, Integer> apply(TopicAndPartition arg1, ByteBufferMessageSet arg2) {
return Tuple2.make(arg1, arg2.sizeInBytes());
}
});
}
示例11: writeTo
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public void writeTo(final ByteBuffer buffer) {
buffer.putShort(versionId);
buffer.putInt(correlationId);
writeShortString(buffer, clientId);
buffer.putShort(requiredAcks);
buffer.putInt(ackTimeoutMs);
//save the topic structure
buffer.putInt(dataGroupedByTopic.size()); //the number of topics
Utils.foreach(dataGroupedByTopic, new Callable2<String, Map<TopicAndPartition,ByteBufferMessageSet>>() {
@Override
public void apply(String topic, Map<TopicAndPartition, ByteBufferMessageSet> topicAndPartitionData) {
writeShortString(buffer, topic); //write the topic
buffer.putInt(topicAndPartitionData.size()); //the number of partitions
Utils.foreach(topicAndPartitionData, new Callable2<TopicAndPartition, ByteBufferMessageSet>() {
@Override
public void apply(TopicAndPartition arg1, ByteBufferMessageSet partitionMessageData) {
int partition = arg1.partition;
ByteBuffer bytes = partitionMessageData.buffer;
buffer.putInt(partition);
buffer.putInt(bytes.limit());
buffer.put(bytes);
bytes.rewind();
}
});
}
});
}
示例12: testSocketSelectionKeyState
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
@Test
public void testSocketSelectionKeyState() throws IOException {
Socket socket = connect();
int correlationId = -1;
String clientId = SyncProducerConfigs.DefaultClientId;
int ackTimeoutMs = SyncProducerConfigs.DefaultAckTimeoutMs;
short ack = 0;
ProducerRequest emptyRequest =
new ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, Maps.<TopicAndPartition, ByteBufferMessageSet>newHashMap());
ByteBuffer byteBuffer = ByteBuffer.allocate(emptyRequest.sizeInBytes());
emptyRequest.writeTo(byteBuffer);
byteBuffer.rewind();
byte[] serializedBytes = new byte[byteBuffer.remaining()];
byteBuffer.get(serializedBytes);
sendRequest(socket, (short) 0, serializedBytes);
final Request request = server.requestChannel.receiveRequest();
// Since the response is not sent yet, the selection key should not be readable.
assertFalse((((SelectionKey) request.requestKey).interestOps() & SelectionKey.OP_READ) == SelectionKey.OP_READ);
server.requestChannel.sendResponse(new Response(0, request, null));
// After the response is sent to the client (which is async and may take a bit of time), the socket key should be available for reads.
assertTrue(
TestUtils.waitUntilTrue(new Function0<Boolean>() {
@Override
public Boolean apply() {
return (((SelectionKey) request.requestKey).interestOps() & SelectionKey.OP_READ) == SelectionKey.OP_READ;
}
}, 5000));
}
示例13: testCleanupExpiredSegments
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
/**
* Test time-based log cleanup. First append messages, then set the time into the future and run cleanup.
*/
@Test
public void testCleanupExpiredSegments() {
Log log = logManager.createLog(new TopicAndPartition(name, 0), logConfig);
long offset = 0L;
for (int i = 0; i < 200; ++i) {
ByteBufferMessageSet set = TestUtils.singleMessageSet("test".getBytes());
LogAppendInfo info = log.append(set);
offset = info.lastOffset;
}
assertTrue("There should be more than one segment now.", log.numberOfSegments() > 1);
Utils.foreach(log.logSegments(), new Callable1<LogSegment>() {
@Override
public void apply(LogSegment _) {
_.log.file.setLastModified(time.milliseconds());
}
});
time.sleep(maxLogAgeMs + 1);
assertEquals("Now there should only be only one segment in the index.", 1, log.numberOfSegments());
time.sleep(log.config.fileDeleteDelayMs + 1);
assertEquals("Files should have been deleted", log.numberOfSegments() * 2, log.dir.list().length);
assertEquals("Should get empty fetch off new log.", 0, log.read(offset + 1, 1024).sizeInBytes());
try {
log.read(0, 1024);
fail("Should get exception from fetching earlier.");
} catch (OffsetOutOfRangeException e) {
// "This is good."
}
// log should still be appendable
log.append(TestUtils.singleMessageSet("test".getBytes()));
}
示例14: pushToStream
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public void pushToStream(String message) {
int streamNo = (int) this.nextStream.incrementAndGet() % this.queues.size();
AtomicLong offset = this.offsets.get(streamNo);
BlockingQueue<FetchedDataChunk> queue = this.queues.get(streamNo);
AtomicLong thisOffset = new AtomicLong(offset.incrementAndGet());
List<Message> seq = Lists.newArrayList();
seq.add(new Message(message.getBytes(Charsets.UTF_8)));
ByteBufferMessageSet messageSet = new ByteBufferMessageSet(NoCompressionCodec$.MODULE$, offset, JavaConversions.asScalaBuffer(seq));
FetchedDataChunk chunk = new FetchedDataChunk(messageSet,
new PartitionTopicInfo("topic", streamNo, queue, thisOffset, thisOffset, new AtomicInteger(1), "clientId"),
thisOffset.get());
queue.add(chunk);
}
示例15: FetchedDataChunk
import kafka.message.ByteBufferMessageSet; //導入依賴的package包/類
public FetchedDataChunk(ByteBufferMessageSet messages, PartitionTopicInfo topicInfo, long fetchOffset) {
this.messages = messages;
this.topicInfo = topicInfo;
this.fetchOffset = fetchOffset;
}