本文整理匯總了Java中kafka.api.FetchRequest類的典型用法代碼示例。如果您正苦於以下問題:Java FetchRequest類的具體用法?Java FetchRequest怎麽用?Java FetchRequest使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
FetchRequest類屬於kafka.api包,在下文中一共展示了FetchRequest類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import kafka.api.FetchRequest; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "test2";
String clientId = "LowLevelConsumerClient1";
SimpleConsumer simpleConsumer = new SimpleConsumer(
"192.168.1.186", 9092, 6000000, 64 * 1000000, clientId);
FetchRequest req = new FetchRequestBuilder().clientId(clientId)
.addFetch(topic, 0, 0L, 1000000)
.addFetch(topic, 1, 0L, 1000000)
.addFetch(topic, 2, 0L, 1000000)
.build();
FetchResponse rep = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = rep.messageSet(topic, 0);
for(MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset : " + offset + ", Payload : " + new String(bytes, "UTF-8"));
}
}
示例2: main
import kafka.api.FetchRequest; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "topic1";
String clientID = "DemoLowLevelConsumer1";
SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
FetchRequest req = new FetchRequestBuilder().clientId(clientID)
.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
}
}
示例3: fetchLatestRecordPayloadBytes
import kafka.api.FetchRequest; //導入依賴的package包/類
private byte[] fetchLatestRecordPayloadBytes(SimpleConsumer kafkaConsumer) {
FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(destinationTopic, 0, 0, 1000000).build();
FetchResponse response = kafkaConsumer.fetch(fetchRequest);
Iterator<MessageAndOffset> messageSetItr = response.messageSet(destinationTopic, 0).iterator();
// Fast forward to the message at the latest offset in the topic
MessageAndOffset latestMessage = new MessageAndOffset(new Message(new byte[] { }), 0L);
while (messageSetItr.hasNext()) {
latestMessage = messageSetItr.next();
}
ByteBuffer payload = latestMessage.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
return bytes;
}
示例4: buildFetchRequest
import kafka.api.FetchRequest; //導入依賴的package包/類
private FetchRequest buildFetchRequest(long offset) {
//1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is
// available at the time the request is issued.
//2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the
// client sets this to 0 the server will always respond immediately, however if there is no new data since their
// last request they will just get back empty message sets. If this is set to 1, the server will respond as soon
// as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher
// values in combination with the timeout the consumer can tune for throughput and trade a little additional
// latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k
// would allow the server to wait up to 100ms to try to accumulate 64k of data before responding).
//3. maxFetchSize is the maximum bytes to include in the message set for this partition.
// This helps bound the size of the response.
LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " +
"max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize);
return new FetchRequestBuilder()
.clientId(clientName)
.minBytes(minFetchSize)
.maxWait(maxWaitTime)
.addFetch(topic, partition, offset, maxFetchSize)
.build();
}
示例5: getMessageSetSince
import kafka.api.FetchRequest; //導入依賴的package包/類
private ByteBufferMessageSet getMessageSetSince(long offset, int timeoutInMs) {
if (timeoutInMs < 0) {
throw new IllegalArgumentException(String.format("Timeout must not lower than 0, timeout is: %d", timeoutInMs));
}
FetchRequest request = new FetchRequestBuilder()
.clientId(generateClientId())
.addFetch(assignedTopicPartition.topic(), assignedTopicPartition.partition(), offset, consumerConfig.bufferSize())
.maxWait(timeoutInMs)
.minBytes(consumerConfig.bufferSize())
.build();
FetchResponse response = partitionConsumer.fetch(request);
if (response.hasError()) {
short errorCode = response.errorCode(assignedTopicPartition.topic(), assignedTopicPartition.partition());
// @todo retry during broker failover
throw new PartitionConsumerException(ErrorMapping.exceptionFor(errorCode));
}
return response.messageSet(assignedTopicPartition.topic(), assignedTopicPartition.partition());
}
示例6: run
import kafka.api.FetchRequest; //導入依賴的package包/類
@Override
public void run()
{
long offset = 0;
while (isAlive) {
// create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();
// FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);
// get the message set from the consumer and print them out
ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
Iterator<MessageAndOffset> itr = messages.iterator();
while (itr.hasNext() && isAlive) {
MessageAndOffset msg = itr.next();
// advance the offset after consuming each message
offset = msg.offset();
logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
receiveCount++;
}
}
}
示例7: fetch
import kafka.api.FetchRequest; //導入依賴的package包/類
/** 返回消費的消息Map, 指定offset
* <li>(Key為Topic name, Value為返回消息的消息List
*
* @param topics The topic names
* @param partitions Topic position
* @param offsets Starting byte offset
* @return
* @throws Exception
*/
static Map<String, List<String>> fetch(SimpleConsumer simpleConsumer, String[] topics, int[] partitions, long[] offsets) throws Exception{
FetchRequest fetchRequest = getFetchRequest(simpleConsumer,topics, partitions, offsets);
FetchResponse fetchResponse = simpleConsumer.fetch(fetchRequest);
Map<String, List<String>> retMap = new HashMap<String, List<String>>();
for (int i = 0; i < topics.length; i++) {
String topic = topics[i];
List list = new ArrayList<String>();
retMap.put(topic, list);
ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitions[i]);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
String msg = new String(bytes, "UTF-8");
list.add(msg);
}
}
return retMap;
}
示例8: openFetchRequest
import kafka.api.FetchRequest; //導入依賴的package包/類
private void openFetchRequest()
{
if (messageAndOffsetIterator == null) {
log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages);
FetchRequest req = new FetchRequestBuilder()
.clientId("presto-worker-" + Thread.currentThread().getName())
.addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE)
.build();
// TODO - this should look at the actual node this is running on and prefer
// that copy if running locally. - look into NodeInfo
SimpleConsumer consumer = consumerManager.getConsumer(split.getNodes().get(0));
FetchResponse fetchResponse = consumer.fetch(req);
if (fetchResponse.hasError()) {
short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId());
log.warn("Fetch response has error: %d", errorCode);
throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
}
messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
}
}
示例9: readMessages
import kafka.api.FetchRequest; //導入依賴的package包/類
public List<byte[]> readMessages(String topic) {
SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer");
FetchRequest req = new FetchRequestBuilder()
.clientId("consumer")
.addFetch(topic, 0, 0, 100000)
.build();
FetchResponse fetchResponse = consumer.fetch(req);
Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator();
List<byte[]> messages = new ArrayList<>();
while(results.hasNext()) {
ByteBuffer payload = results.next().message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
messages.add(bytes);
}
consumer.close();
return messages;
}
示例10: fetchMore
import kafka.api.FetchRequest; //導入依賴的package包/類
private void fetchMore()
{
FetchRequest fetchRequest = this.builder
.clientId(split.getClientId())
.addFetch(split.getTopicName(), split.getPartitionId(),
nextFetchOffset, DEFAULT_BUFFER_SIZE).build();
response = consumer.fetch(fetchRequest);
this.currentResponseIter = null;
if (response != null)
{
List<MessageAndOffset> currentResponseList = new ArrayList<MessageAndOffset>();
for (MessageAndOffset messageAndOffset : response.messageSet(
split.getTopicName(), split.getPartitionId()))
{
currentResponseList.add(messageAndOffset);
}
this.currentResponseIter = currentResponseList.size() > 0 ? currentResponseList.iterator() : null;
}
}
示例11: continueItr
import kafka.api.FetchRequest; //導入依賴的package包/類
/**
* THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff
* the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method
* returns false, you risk a {@link NullPointerException} OR a no-more-elements exception.
*
* @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}.
*/
@VisibleForTesting
boolean continueItr() {
final long remaining = end - currentOffset;
if (!canCallNext() && remaining > 0) {
final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize;
LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize,
currentOffset));
final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition()
.getPartId(), currentOffset, theFetchSize);
final ByteBufferMessageSet msg = consumer.fetch(request);
final int errorCode = msg.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
return false;
}
if (errorCode != ErrorMapping.NoError()) {
ErrorMapping.maybeThrowException(errorCode);
} // --> else we try to grab the next iterator
currentMessageItr = msg.iterator();
currentOffset += msg.validBytes();
}
return canCallNext();
}
示例12: doWork
import kafka.api.FetchRequest; //導入依賴的package包/類
@Override
public void doWork() {
Utils.inLock(partitionMapLock, new Function0<Object>() {
@Override
public Object apply() {
if (partitionMap.isEmpty())
Utils.await(partitionMapCond, 200L, TimeUnit.MILLISECONDS);
Utils.foreach(partitionMap, new Callable2<TopicAndPartition, Long>() {
@Override
public void apply(TopicAndPartition topicAndPartition, Long offset) {
fetchRequestBuilder.addFetch(topicAndPartition.topic, topicAndPartition.partition,
offset, fetchSize);
}
});
return null;
}
});
FetchRequest fetchRequest = fetchRequestBuilder.build();
if (!fetchRequest.requestInfo.isEmpty())
processFetchRequest(fetchRequest);
}
示例13: fetchMore
import kafka.api.FetchRequest; //導入依賴的package包/類
public boolean fetchMore () throws IOException {
if (!hasMore()) return false;
FetchRequest fetchRequest =
new FetchRequest(_request.getTopic(), _request.getPartition(), _offset, _bufferSize);
List<FetchRequest> array = new ArrayList<FetchRequest>();
array.add(fetchRequest);
long tempTime = System.currentTimeMillis();
_response = _consumer.multifetch(array);
if(_response != null)
_respIterator = _response.iterator();
_requestTime += (System.currentTimeMillis() - tempTime);
return true;
}
示例14: emitPartitionBatchNew
import kafka.api.FetchRequest; //導入依賴的package包/類
public static BatchMeta emitPartitionBatchNew(KafkaConfig config, int partition, SimpleConsumer consumer, TransactionAttempt attempt, BatchOutputCollector collector, BatchMeta lastMeta) {
long offset = 0;
if(lastMeta!=null) {
offset = lastMeta.nextOffset;
}
ByteBufferMessageSet msgs;
try {
msgs = consumer.fetch(new FetchRequest(config.topic, partition % config.partitionsPerHost, offset, config.fetchSizeBytes));
} catch(Exception e) {
if(e instanceof ConnectException) {
throw new FailedFetchException(e);
} else {
throw new RuntimeException(e);
}
}
long endoffset = offset;
for(MessageAndOffset msg: msgs) {
emit(config, attempt, collector, msg.message());
endoffset = msg.offset();
}
BatchMeta newMeta = new BatchMeta();
newMeta.offset = offset;
newMeta.nextOffset = endoffset;
return newMeta;
}
示例15: main
import kafka.api.FetchRequest; //導入依賴的package包/類
public static void main(String[] args) {
Properties props = new Properties();
// props.put("zk.connect","10.15.62.76:2181");
// props.put("groupid","testgroup");
SimpleConsumer consumer = new SimpleConsumer("10.15.62.70",9092,10000,1024000);
long offset = 0;
int count = 0;
String str1 = "";
// while(true){
FetchRequest fetchRequest = new FetchRequest("topic1114",3,offset,10000000);//���һ��������һ���������ݵ������byte
ByteBufferMessageSet messages = consumer.fetch(fetchRequest);
for(MessageAndOffset msg :messages){
count++;
ByteBuffer buffer = msg.message().payload();
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes);
String str = new String(bytes);
System.out.println(str);
offset = msg.offset();
System.out.println("offset: " + offset);
}
System.out.println("------------ count= " + count);
// }
}