本文整理汇总了Java中kafka.javaapi.OffsetResponse.errorCode方法的典型用法代码示例。如果您正苦于以下问题:Java OffsetResponse.errorCode方法的具体用法?Java OffsetResponse.errorCode怎么用?Java OffsetResponse.errorCode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.javaapi.OffsetResponse
的用法示例。
在下文中一共展示了OffsetResponse.errorCode方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static Long getOffset(OffsetResponse response, TopicAndPartition topicPartition) {
String topic = topicPartition.topic();
int partition = topicPartition.partition();
long[] offsets = response.offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
}
short errorCode = response.errorCode(topic, partition);
if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
log.info("Unknown topic or partition {} {}", topic, partition);
return null;
}
throw new IllegalStateException(
"Error reading offset for " + topic + " / " + partition + ": " +
ErrorMapping.exceptionNameFor(errorCode));
}
示例2: fetchResetOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public long fetchResetOffset(String reset){
long time = LatestTime();
if (reset != null && reset.equals(SmallestTimeString()))
time = EarliestTime();
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
TopicAndPartition tp = new TopicAndPartition(topic, partition);
PartitionOffsetRequestInfo info = new PartitionOffsetRequestInfo(time,1);
requestInfo.put(tp, info);
OffsetRequest request = new OffsetRequest(requestInfo,CurrentVersion(), clientId);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
//ErrorMapping.exceptionFor(response.errorCode(topic, partition)).printStackTrace();
throw new KafkaPartitionReaderException(response.errorCode(topic, partition));
}
long[] offsets = response.offsets(topic, partition);
//TODO: confirm with xiaoju why we need this check?
// if (offsets.length <= 0)
// continue;
return offsets[0];
}
示例3: findAllOffsets
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId)
{
TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);
// The API implies that this will always return all of the offsets. So it seems a partition can not have
// more than Integer.MAX_VALUE-1 segments.
//
// This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
// should not be 0.
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE);
OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
if (offsetResponse.hasError()) {
short errorCode = offsetResponse.errorCode(topicName, partitionId);
log.warn("Offset response has error: %d", errorCode);
throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
}
return offsetResponse.offsets(topicName, partitionId);
}
示例4: findLastOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(),
topicPartition.getPartition());
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
kafka.api.OffsetRequest.LatestTime(), 1));
final String clientName = getClientName(topicPartition);
OffsetRequest request = new OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(),
clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
throw new RuntimeException("Error fetching offset data. Reason: " +
response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()));
}
long[] offsets = response.offsets(topicPartition.getTopic(),
topicPartition.getPartition());
return offsets[0] - 1;
}
示例5: getLastOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
/**
* Retrieves the last offset before the given timestamp for a given topic partition.
*
* @return The last offset before the given timestamp or {@code 0} if failed to do so.
*/
private long getLastOffset(TopicPartition topicPart, long timestamp) {
BrokerInfo brokerInfo = brokerService.getLeader(topicPart.getTopic(), topicPart.getPartition());
SimpleConsumer consumer = brokerInfo == null ? null : consumers.getUnchecked(brokerInfo);
// If no broker, treat it as failure attempt.
if (consumer == null) {
LOG.warn("Failed to talk to any broker. Default offset to 0 for {}", topicPart);
return 0L;
}
// Fire offset request
OffsetRequest request = new OffsetRequest(ImmutableMap.of(
new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()),
new PartitionOffsetRequestInfo(timestamp, 1)
), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse response = consumer.getOffsetsBefore(request);
// Retrieve offsets from response
long[] offsets = response.hasError() ? null : response.offsets(topicPart.getTopic(), topicPart.getPartition());
if (offsets == null || offsets.length <= 0) {
short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition());
// If the topic partition doesn't exists, use offset 0 without logging error.
if (errorCode != ErrorMapping.UnknownTopicOrPartitionCode()) {
consumers.refresh(brokerInfo);
LOG.warn("Failed to fetch offset for {} with timestamp {}. Error: {}. Default offset to 0.",
topicPart, timestamp, errorCode);
}
return 0L;
}
LOG.debug("Offset {} fetched for {} with timestamp {}.", offsets[0], topicPart, timestamp);
return offsets[0];
}
示例6: getOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long getOffset(KafkaPartition partition,
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
throws KafkaOffsetRetrievalFailureException {
SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
for (int i = 0; i < NUM_TRIES_FETCH_OFFSET; i++) {
try {
OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo,
kafka.api.OffsetRequest.CurrentVersion(), DEFAULT_KAFKA_CLIENT_NAME));
if (offsetResponse.hasError()) {
throw new RuntimeException(
"offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
}
return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
} catch (Exception e) {
LOG.warn(
String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e));
if (i < NUM_TRIES_FETCH_OFFSET - 1) {
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
LOG.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
}
}
}
}
throw new KafkaOffsetRetrievalFailureException(
String.format("Fetching offset for partition %s has failed.", partition));
}
示例7: fetchResetOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long fetchResetOffset(String reset) {
long time = LatestTime();
if (reset != null && reset.equals(SmallestTimeString()))
time = EarliestTime();
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
TopicAndPartition tp = new TopicAndPartition(m_topic, m_partition);
PartitionOffsetRequestInfo info = new PartitionOffsetRequestInfo(time,
1);
requestInfo.put(tp, info);
OffsetRequest request = new OffsetRequest(requestInfo,
CurrentVersion(), m_clientId);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < m_retries; j++) {
OffsetResponse response = m_consumer.getOffsetsBefore(request);
if (response.hasError()) {
short errorCode = response.errorCode(m_topic, m_partition);
LOGGER.warn(
"Error when fetch offset from kafka, errorCode="
+ errorCode);
continue;
}
long[] offsets = response.offsets(m_topic, m_partition);
if (offsets.length <= 0)
continue;
return offsets[0];
}
// cannot get offset after retries, reinit and try again
reinit();
}
throw new RuntimeException("Fail to get resetOffset " + reset
+ " after retries for " + m_clientId);
}
示例8: getLastOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public long getLastOffset() throws InterruptedException {
OffsetResponse response = null;
Broker previousLeader = leaderBroker;
while (true) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
ensureConsumer(previousLeader);
try {
response = consumer.getOffsetsBefore(request);
} catch (Exception e) {
// e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
if (Thread.interrupted()) {
logger.info("catch exception of {} with interrupted in getLastOffset for {} - {}",
e.getClass().getName(), topic, partitionId);
throw new InterruptedException();
}
logger.warn("caughte exception in getLastOffset {} - {}", topic, partitionId, e);
response = null;
}
if (response == null || response.hasError()) {
short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
logger.warn("Error fetching data Offset for {} - {}, the Broker. Reason: {}",
topic, partitionId, errorCode);
stopConsumer();
previousLeader = leaderBroker;
leaderBroker = null;
continue;
}
break;
}
long[] offsets = response.offsets(topic, partitionId);
return offsets[offsets.length - 1];
}
示例9: getOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long getOffset(KafkaPartition partition,
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
throws KafkaOffsetRetrievalFailureException {
SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
for (int i = 0; i < this.fetchOffsetRetries; i++) {
try {
OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo,
kafka.api.OffsetRequest.CurrentVersion(), this.clientName));
if (offsetResponse.hasError()) {
throw new RuntimeException(
"offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
}
return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
} catch (Exception e) {
LOG.warn(
String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e));
if (i < this.fetchOffsetRetries - 1) {
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
LOG.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
}
}
}
}
throw new KafkaOffsetRetrievalFailureException(
String.format("Fetching offset for partition %s has failed.", partition));
}
示例10: getOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long getOffset(KafkaPartition partition, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
throws KafkaOffsetRetrievalFailureException {
SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
for (int i = 0; i < this.fetchOffsetRetries; i++) {
try {
OffsetResponse offsetResponse =
consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo, kafka.api.OffsetRequest.CurrentVersion(),
this.clientName));
if (offsetResponse.hasError()) {
throw new RuntimeException("offsetReponse has error: "
+ offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
}
return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
} catch (Exception e) {
log.warn(String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1,
e));
if (i < this.fetchOffsetRetries - 1) {
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
log.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
}
}
}
}
throw new KafkaOffsetRetrievalFailureException(String.format("Fetching offset for partition %s has failed.",
partition));
}
示例11: fetchPartitionOffset
import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
/**
* Fetches the numeric Kafka offset for this partition for a symbolic name ("largest" or "smallest").
*
* @param requestedOffset Either "largest" or "smallest"
* @param timeoutMillis Timeout in milliseconds
* @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
* milliseconds
* @return An offset
*/
public synchronized long fetchPartitionOffset(String requestedOffset, int timeoutMillis)
throws java.util.concurrent.TimeoutException {
Preconditions.checkNotNull(requestedOffset);
final long offsetRequestTime;
if (requestedOffset.equalsIgnoreCase("largest")) {
offsetRequestTime = kafka.api.OffsetRequest.LatestTime();
} else if (requestedOffset.equalsIgnoreCase("smallest")) {
offsetRequestTime = kafka.api.OffsetRequest.EarliestTime();
} else if (requestedOffset.equalsIgnoreCase("testDummy")) {
return -1L;
} else {
throw new IllegalArgumentException("Unknown initial offset value " + requestedOffset);
}
int kafkaErrorCount = 0;
final int MAX_KAFKA_ERROR_COUNT = 10;
final long endTime = System.currentTimeMillis() + timeoutMillis;
while(System.currentTimeMillis() < endTime) {
// Try to get into a state where we're connected to Kafka
while (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER &&
System.currentTimeMillis() < endTime) {
_currentState.process();
}
if (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER &&
endTime <= System.currentTimeMillis()) {
throw new TimeoutException();
}
// Send the offset request to Kafka
OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition),
new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId);
OffsetResponse offsetResponse;
try {
offsetResponse = _simpleConsumer.getOffsetsBefore(request);
} catch (Exception e) {
_currentState.handleConsumerException(e);
continue;
}
final short errorCode = offsetResponse.errorCode(_topic, _partition);
if (errorCode == Errors.NONE.code()) {
long offset = offsetResponse.offsets(_topic, _partition)[0];
if (offset == 0L) {
LOGGER.warn("Fetched offset of 0 for topic {} and partition {}, is this a newly created topic?", _topic,
_partition);
}
return offset;
} else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
// If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} else {
// Retry after a short delay
kafkaErrorCount++;
if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
throw exceptionForKafkaErrorCode(errorCode);
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
throw new TimeoutException();
}