本文整理匯總了Java中kafka.api.FetchRequestBuilder類的典型用法代碼示例。如果您正苦於以下問題:Java FetchRequestBuilder類的具體用法?Java FetchRequestBuilder怎麽用?Java FetchRequestBuilder使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
FetchRequestBuilder類屬於kafka.api包,在下文中一共展示了FetchRequestBuilder類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "test2";
String clientId = "LowLevelConsumerClient1";
SimpleConsumer simpleConsumer = new SimpleConsumer(
"192.168.1.186", 9092, 6000000, 64 * 1000000, clientId);
FetchRequest req = new FetchRequestBuilder().clientId(clientId)
.addFetch(topic, 0, 0L, 1000000)
.addFetch(topic, 1, 0L, 1000000)
.addFetch(topic, 2, 0L, 1000000)
.build();
FetchResponse rep = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = rep.messageSet(topic, 0);
for(MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset : " + offset + ", Payload : " + new String(bytes, "UTF-8"));
}
}
示例2: main
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String topic = "topic1";
String clientID = "DemoLowLevelConsumer1";
SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
FetchRequest req = new FetchRequestBuilder().clientId(clientID)
.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
long offset = messageAndOffset.offset();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
}
}
示例3: fetchLatestRecordPayloadBytes
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
private byte[] fetchLatestRecordPayloadBytes(SimpleConsumer kafkaConsumer) {
FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(destinationTopic, 0, 0, 1000000).build();
FetchResponse response = kafkaConsumer.fetch(fetchRequest);
Iterator<MessageAndOffset> messageSetItr = response.messageSet(destinationTopic, 0).iterator();
// Fast forward to the message at the latest offset in the topic
MessageAndOffset latestMessage = new MessageAndOffset(new Message(new byte[] { }), 0L);
while (messageSetItr.hasNext()) {
latestMessage = messageSetItr.next();
}
ByteBuffer payload = latestMessage.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
return bytes;
}
示例4: buildFetchRequest
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
private FetchRequest buildFetchRequest(long offset) {
//1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is
// available at the time the request is issued.
//2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the
// client sets this to 0 the server will always respond immediately, however if there is no new data since their
// last request they will just get back empty message sets. If this is set to 1, the server will respond as soon
// as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher
// values in combination with the timeout the consumer can tune for throughput and trade a little additional
// latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k
// would allow the server to wait up to 100ms to try to accumulate 64k of data before responding).
//3. maxFetchSize is the maximum bytes to include in the message set for this partition.
// This helps bound the size of the response.
LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " +
"max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize);
return new FetchRequestBuilder()
.clientId(clientName)
.minBytes(minFetchSize)
.maxWait(maxWaitTime)
.addFetch(topic, partition, offset, maxFetchSize)
.build();
}
示例5: getMessageSetSince
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
private ByteBufferMessageSet getMessageSetSince(long offset, int timeoutInMs) {
if (timeoutInMs < 0) {
throw new IllegalArgumentException(String.format("Timeout must not lower than 0, timeout is: %d", timeoutInMs));
}
FetchRequest request = new FetchRequestBuilder()
.clientId(generateClientId())
.addFetch(assignedTopicPartition.topic(), assignedTopicPartition.partition(), offset, consumerConfig.bufferSize())
.maxWait(timeoutInMs)
.minBytes(consumerConfig.bufferSize())
.build();
FetchResponse response = partitionConsumer.fetch(request);
if (response.hasError()) {
short errorCode = response.errorCode(assignedTopicPartition.topic(), assignedTopicPartition.partition());
// @todo retry during broker failover
throw new PartitionConsumerException(ErrorMapping.exceptionFor(errorCode));
}
return response.messageSet(assignedTopicPartition.topic(), assignedTopicPartition.partition());
}
示例6: run
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
@Override
public void run()
{
long offset = 0;
while (isAlive) {
// create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();
// FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);
// get the message set from the consumer and print them out
ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
Iterator<MessageAndOffset> itr = messages.iterator();
while (itr.hasNext() && isAlive) {
MessageAndOffset msg = itr.next();
// advance the offset after consuming each message
offset = msg.offset();
logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
receiveCount++;
}
}
}
示例7: openFetchRequest
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
private void openFetchRequest()
{
if (messageAndOffsetIterator == null) {
log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages);
FetchRequest req = new FetchRequestBuilder()
.clientId("presto-worker-" + Thread.currentThread().getName())
.addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE)
.build();
// TODO - this should look at the actual node this is running on and prefer
// that copy if running locally. - look into NodeInfo
SimpleConsumer consumer = consumerManager.getConsumer(split.getNodes().get(0));
FetchResponse fetchResponse = consumer.fetch(req);
if (fetchResponse.hasError()) {
short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId());
log.warn("Fetch response has error: %d", errorCode);
throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
}
messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
}
}
示例8: readMessages
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public List<byte[]> readMessages(String topic) {
SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer");
FetchRequest req = new FetchRequestBuilder()
.clientId("consumer")
.addFetch(topic, 0, 0, 100000)
.build();
FetchResponse fetchResponse = consumer.fetch(req);
Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator();
List<byte[]> messages = new ArrayList<>();
while(results.hasNext()) {
ByteBuffer payload = results.next().message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
messages.add(bytes);
}
consumer.close();
return messages;
}
示例9: nextMessageSet
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
void nextMessageSet() throws Exception {
FetchRequest req =
new FetchRequestBuilder().
clientId(name).
addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
minBytes(1).
maxWait(1000).
build();
FetchResponse fetchResponse = consumer.fetch(req);
if(fetchResponse.hasError()) {
throw new Exception("TODO: handle the error, reset the consumer....");
}
currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
currentMessageSetIterator = currentMessageSet.iterator();
}
示例10: receive
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public void receive() {
SimpleConsumer consumer = createConsumer();
kafka.api.FetchRequest fetchRequest = null;
FetchResponse fetchResponse = null;
int partition = part.partitionId();
int errorNum = 0;
if (offsetInit) {
offsetInit = false;
this.offset = getLastOffset(consumer, topic, partition, offsetTime, consumer.clientId());
}
while (offset > -1) {
if (consumer == null) {
consumer = createConsumer();
}
// 構建獲取數據的請求對象, 給定獲取數據對應的topic、partition、offset以及每次獲取數據最多獲取條數
fetchRequest = new FetchRequestBuilder().clientId(consumer.clientId()).addFetch(topic, partition, offset,
FETCH_SIZE).build();
// 發送請求到Kafka,並獲得返回值
fetchResponse = consumer.fetch(fetchRequest);
// 如果返回對象表示存在異常,進行異常處理,並進行consumer重新連接的操作
// 當異常連續出現次數超過5次的時候,程序拋出異常
if (fetchResponse.hasError()) {
errorNum++;
short errorCode = fetchResponse.errorCode(topic, partition);
offset = dealFetchError(consumer, partition, offset, errorCode, errorNum);
continue;
}
errorNum = 0;
// 接收數據沒有異常,那麽開始對數據進行具體操作
offset = dealFetchRecord(fetchResponse, partition, offset);
}
}
示例11: main
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
generateData();
SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL,
KafkaProperties.KAFKA_SERVER_PORT,
KafkaProperties.CONNECTION_TIMEOUT,
KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE,
KafkaProperties.CLIENT_ID);
System.out.println("Testing single fetch");
FetchRequest req = new FetchRequestBuilder()
.clientId(KafkaProperties.CLIENT_ID)
.addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
.build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 0));
System.out.println("Testing single multi-fetch");
Map<String, List<Integer>> topicMap = new HashMap<>();
topicMap.put(KafkaProperties.TOPIC2, Collections.singletonList(0));
topicMap.put(KafkaProperties.TOPIC3, Collections.singletonList(0));
req = new FetchRequestBuilder()
.clientId(KafkaProperties.CLIENT_ID)
.addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
.addFetch(KafkaProperties.TOPIC3, 0, 0L, 100)
.build();
fetchResponse = simpleConsumer.fetch(req);
int fetchReq = 0;
for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) {
String topic = entry.getKey();
for (Integer offset : entry.getValue()) {
System.out.println("Response from fetch request no: " + ++fetchReq);
printMessages(fetchResponse.messageSet(topic, offset));
}
}
}
示例12: fetchMessages
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) throws TopicOffsetOutOfRangeException, RuntimeException {
ByteBufferMessageSet msgs = null;
String topic = config.topic;
int partitionId = partition.partition;
FetchRequestBuilder builder = new FetchRequestBuilder();
FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
clientId(config.clientId).maxWait(config.fetchMaxWait).build();
FetchResponse fetchResponse;
try {
fetchResponse = consumer.fetch(fetchRequest);
} catch (Exception e) {
if (e instanceof ConnectException ||
e instanceof SocketTimeoutException ||
e instanceof IOException ||
e instanceof UnresolvedAddressException
) {
LOG.warn("Network error when fetching messages:", e);
throw new FailedFetchException(e);
} else {
throw new RuntimeException(e);
}
}
if (fetchResponse.hasError()) {
KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange) {
String msg = "Got fetch request with offset out of range: [" + offset + "]";
LOG.warn(msg);
throw new TopicOffsetOutOfRangeException(msg);
} else {
String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
LOG.error(message);
throw new FailedFetchException(message);
}
} else {
msgs = fetchResponse.messageSet(topic, partitionId);
}
return msgs;
}
示例13: main
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
generateData();
SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL,
KafkaProperties.KAFKA_SERVER_PORT,
KafkaProperties.CONNECTION_TIMEOUT,
KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE,
KafkaProperties.CLIENT_ID);
System.out.println("Testing single fetch");
FetchRequest req = new FetchRequestBuilder()
.clientId(KafkaProperties.CLIENT_ID)
.addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
.build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 0));
System.out.println("Testing single multi-fetch");
Map<String, List<Integer>> topicMap = new HashMap<String, List<Integer>>();
topicMap.put(KafkaProperties.TOPIC2, Collections.singletonList(0));
topicMap.put(KafkaProperties.TOPIC3, Collections.singletonList(0));
req = new FetchRequestBuilder()
.clientId(KafkaProperties.CLIENT_ID)
.addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
.addFetch(KafkaProperties.TOPIC3, 0, 0L, 100)
.build();
fetchResponse = simpleConsumer.fetch(req);
int fetchReq = 0;
for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) {
String topic = entry.getKey();
for (Integer offset : entry.getValue()) {
System.out.println("Response from fetch request no: " + ++fetchReq);
printMessages(fetchResponse.messageSet(topic, offset));
}
}
}
示例14: fetchMessages
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
/**
* Makes a call to kafka to fetch messages.
*/
private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) {
FetchRequest request = new FetchRequestBuilder()
.clientId(consumer.clientId())
.addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE)
.maxWait(MAX_WAIT)
.build();
return consumer.fetch(request);
}
示例15: initializeLastProcessingOffset
import kafka.api.FetchRequestBuilder; //導入依賴的package包/類
private void initializeLastProcessingOffset()
{
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}