本文整理匯總了Java中kafka.javaapi.consumer.SimpleConsumer.fetchOffsets方法的典型用法代碼示例。如果您正苦於以下問題:Java SimpleConsumer.fetchOffsets方法的具體用法?Java SimpleConsumer.fetchOffsets怎麽用?Java SimpleConsumer.fetchOffsets使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類kafka.javaapi.consumer.SimpleConsumer
的用法示例。
在下文中一共展示了SimpleConsumer.fetchOffsets方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getOffsetOfTopicAndPartition
import kafka.javaapi.consumer.SimpleConsumer; //導入方法依賴的package包/類
/**
* 從保存consumer消費者offset偏移量的位置獲取當前consumer對應的偏移量
*
* @param consumer 消費者
* @param groupId Group Id
* @param clientName client名稱
* @param topic topic名稱
* @param partitionID 分區id
*
* @return
*/
public long getOffsetOfTopicAndPartition(SimpleConsumer consumer, String groupId, String clientName, String
topic, int partitionID) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionID);
List<TopicAndPartition> requestInfo = new ArrayList<TopicAndPartition>();
requestInfo.add(topicAndPartition);
OffsetFetchRequest request = new OffsetFetchRequest(groupId, requestInfo, 0, clientName);
OffsetFetchResponse response = consumer.fetchOffsets(request);
// 獲取返回值
Map<TopicAndPartition, OffsetMetadataAndError> returnOffsetMetadata = response.offsets();
// 處理返回值
if (returnOffsetMetadata != null && !returnOffsetMetadata.isEmpty()) {
// 獲取當前分區對應的偏移量信息
OffsetMetadataAndError offset = returnOffsetMetadata.get(topicAndPartition);
if (offset.error().code() == ErrorMapping.NoError()) {
// 沒有異常,表示是正常的,獲取偏移量
return offset.offset();
} else {
// 當Consumer第一次連接的時候(zk中不在當前topic對應數據的時候),會產生UnknownTopicOrPartitionCode異常
System.out.println("Error fetching data Offset Data the Topic and Partition. Reason: " + offset.error
());
}
}
// 所有異常情況直接返回0
return 0;
}
示例2: fetchNextOffset
import kafka.javaapi.consumer.SimpleConsumer; //導入方法依賴的package包/類
private long fetchNextOffset(SimpleConsumer consumer, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
List<TopicAndPartition> requestInfo = new ArrayList<>();
requestInfo.add(topicAndPartition);
OffsetFetchRequest fetchRequest = new OffsetFetchRequest(groupid, requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), correlationId, clientName);
OffsetFetchResponse response = null;
while (true) {
try {
logger.debug("partition {} fetch offest request", partition);
response = consumer.fetchOffsets(fetchRequest);
if (response != null)
break;
} catch (Exception e) {
logger.error("some error occur when fetch messages", e);
try {
Thread.sleep(EXCEPTION_SLEEP_TIME);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
}
}
OffsetMetadataAndError offset = response.offsets().get(topicAndPartition);
if (offset.error() == 0)
return offset.offset();
else
return 0;
}
示例3: getOffsetResponse
import kafka.javaapi.consumer.SimpleConsumer; //導入方法依賴的package包/類
public static OffsetFetchResponse getOffsetResponse(String groupId, List<TopicAndPartition> topicAndPartitions, short versionId, int correlationId,
String clientId) {
SimpleConsumer simpleConsumer = SimpleKafkaHelper.getDefaultSimpleConsumer();
OffsetFetchRequest offsetRequest = new OffsetFetchRequest(groupId, topicAndPartitions, versionId, correlationId, clientId);
OffsetFetchResponse offsetResponse = simpleConsumer.fetchOffsets(offsetRequest);
return offsetResponse;
}
示例4: getLastOffset
import kafka.javaapi.consumer.SimpleConsumer; //導入方法依賴的package包/類
/**
* Retrieves latest committed offset for a given topic & partition. Uses the
* new Kafka offset storage API introduced in 0.8.1.
* @param consumer consumer client to use for request
* @param topic topic id for which to lookup offset
* @param partition partition id for which to lookup offset
* @param clientName client id to include in request
* @return the offset returned from the lead broker
*/
private static long getLastOffset(final SimpleConsumer consumer, final String topic,
final int partition, final String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
List<TopicAndPartition> partitions = new ArrayList<>();
partitions.add(topicAndPartition);
OffsetFetchRequest fetchRequest = new OffsetFetchRequest(
GROUP_ID,
partitions,
(short) 1,
CORRELATION_ID,
clientName);
OffsetFetchResponse response = consumer.fetchOffsets(fetchRequest);
if (response == null) {
log.error("Error fetching offset data from the Broker.");
return -1;
}
OffsetMetadataAndError result = response.offsets().get(topicAndPartition);
short offsetFetchErrorCode = result.error();
if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
log.error("Error encountered whilst fetching Kafka offset: NotCoordinatorForConsumerCode");
return -1;
} else if (offsetFetchErrorCode == ErrorMapping.OffsetsLoadInProgressCode()) {
log.error("Error encountered whilst fetching Kafka offset: OffsetsLoadInProgressCode");
return -1;
} else {
long retrievedOffset = result.offset();
String retrievedMetadata = result.metadata();
log.debug("Received offsets for topic " + topic + " & partition " + partition);
log.debug("Offset: " + String.valueOf(retrievedOffset) + " Metadata: " + retrievedMetadata);
// if broker has returned -1 without error, we've yet to commit.
// start to read from 0
if (retrievedOffset == -1) {
log.info("No commits found against Kafka queue for topic "
+ topic + " & partition " + partition + ". Setting read offset to 0");
return 0;
} else {
return retrievedOffset;
}
}
}