本文整理汇总了Java中kafka.common.ErrorMapping.OffsetOutOfRangeCode方法的典型用法代码示例。如果您正苦于以下问题:Java ErrorMapping.OffsetOutOfRangeCode方法的具体用法?Java ErrorMapping.OffsetOutOfRangeCode怎么用?Java ErrorMapping.OffsetOutOfRangeCode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.common.ErrorMapping
的用法示例。
在下文中一共展示了ErrorMapping.OffsetOutOfRangeCode方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: continueItr
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff
* the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method
* returns false, you risk a {@link NullPointerException} OR a no-more-elements exception.
*
* @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}.
*/
@VisibleForTesting
boolean continueItr() {
final long remaining = end - currentOffset;
if (!canCallNext() && remaining > 0) {
final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize;
LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize,
currentOffset));
final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition()
.getPartId(), currentOffset, theFetchSize);
final ByteBufferMessageSet msg = consumer.fetch(request);
final int errorCode = msg.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
return false;
}
if (errorCode != ErrorMapping.NoError()) {
ErrorMapping.maybeThrowException(errorCode);
} // --> else we try to grab the next iterator
currentMessageItr = msg.iterator();
currentOffset += msg.validBytes();
}
return canCallNext();
}
示例2: hasError
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* Called by the default implementation of {@link #map} to check error code
* to determine whether to continue.
*/
protected boolean hasError(ByteBufferMessageSet messages)
throws IOException {
int errorCode = messages.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
/* offset cannot cross the maximum offset (guaranteed by Kafka protocol).
Kafka server may delete old files from time to time */
System.err.println("WARNING: current offset=" + _offset + ". It is out of range.");
if (_retry >= MAX_RETRY_TIME) return true;
_retry++;
// get the current offset range
_offsetRange = getOffsetRange();
_offset = _offsetRange[0];
return false;
} else if (errorCode == ErrorMapping.InvalidMessageCode()) {
throw new IOException(_input + " current offset=" + _offset
+ " : invalid offset.");
} else if (errorCode == ErrorMapping.WrongPartitionCode()) {
throw new IOException(_input + " : wrong partition");
} else if (errorCode != ErrorMapping.NoError()) {
throw new IOException(_input + " current offset=" + _offset
+ " error:" + errorCode);
} else
return false;
}
示例3: checkNeedNewLeader
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
@SuppressWarnings("squid:MethodCyclomaticComplexity")
private void checkNeedNewLeader(final short errorCode) {
LOG.warn("Error fetching data from the Broker: [{}:{}] Topic: {}-[{}:{}]@{} Error: {}", consumer.host(), consumer.port(), consumerGroup, topic, partitionId, currentOffset, errorCode);
boolean needNewLeader = false;
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
PartitionMetadata partitionMetadata = KafkaMetaData.getPartitionMetadata(consumer, Collections.singletonList(topic), partitionId);
if (partitionMetadata == null || !LeaderBrokerChecker.isSameBroker(
KafkaMetaData.findNewLeader(brokersList, partitionMetadata.leader(), topic, partitionId).leader(),
partitionMetadata.leader())) {
needNewLeader = true;
} else {
long earliestOffset = getEarliestOffset();
long latestOffset = getLatestOffset();
if (latestOffset < 0 || earliestOffset < 0)
needNewLeader = true;
else if (currentOffset > latestOffset)
throw new KafkaException("Offset Out of Higher Bound for [" + topic + ":" + partitionId + "@" + currentOffset + "] latest:" + latestOffset);
else if (currentOffset < earliestOffset)
throw new KafkaException("Offset Out of Lower Bound for [" + topic + ":" + partitionId + "@" + currentOffset + "] earliest:" + earliestOffset);
}
} else {
needNewLeader = true;
}
if (needNewLeader)
connect();
}
示例4: checkLeader
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* Check the leader.
*
* @param a_topic topic name
* @param a_partition partition number
* @param a_beginOffset begin offset
* @return boolean
*/
private boolean checkLeader(String a_topic, int a_partition,
long a_beginOffset) {
if (checkConsumer(a_topic, a_partition)) {
FetchRequest req = new FetchRequestBuilder()
.clientId(pool.getClientId())
.addFetch(a_topic, a_partition, a_beginOffset,
KafkaConstants.FETCH_SIZE).build();
fetchResponse = consumer.get().fetch(req);
String leadHost = metadata.leader().host();
if (fetchResponse.hasError()) {
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
logger.error("Error fetching data from the Broker:" + leadHost
+ " Reason: " + code);
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for
// the last element to reset
a_beginOffset = getLatestOffset(a_topic, a_partition);
}
consumer.get().close();
consumer.set(null);
try {
metadata = findNewLeader(leadHost, a_topic, a_partition);
} catch (MQException e) {
logger.error("Find new leader failed.", e);
}
return false;
}
return true;
}
return false;
}
示例5: handlePartitionReaderException
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public void handlePartitionReaderException(PartitionReader reader,KafkaPartitionReaderException e){
if(e instanceof KafkaPartitionReaderException){
LOG.log("ErrorExcepiton:"+e.getCode());
LOG.log(ErrorMapping.exceptionFor(e.getCode()).getClass().getName());
try{
if (e.getCode() == ErrorMapping.OffsetOutOfRangeCode()) {
reader.resetOffset();
}else if(e.getCode()==ErrorMapping.NotLeaderForPartitionCode()){
reader.reinit();
}else if(e.getCode()==ErrorMapping.LeaderNotAvailableCode()){
try {
Thread.sleep(config.refreshLeaderBackoffMs());
} catch (InterruptedException e1) {
}
reader.reinit();
}else {
LOG.log("Un-predicated KafkaException:"+e.getCode());
reader.reinit();
}
}catch(Exception t){
LOG.log("Handle Kafka PartitionReader Exception.",e);
}
}else{
}
}
示例6: run
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
// 获取指定Topic partition的元数据
PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
String leadBrokerHost = metadata.leader().host();
int leadBrokerPort = metadata.leader().port();
String clientName = "Client_" + topic + "_" + partition;
SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (maxReads > 0) {
int fetchSize = 100000;
//// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
readOffset, fetchSize).build();
// 发送请求到Kafka,并获得返回值
FetchResponse fetchResponse = consumer.fetch(req);
// 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
// 当异常连续出现次数超过5次的时候,程序抛出异常
if (fetchResponse.hasError()) {
numErrors++;
if (numErrors > 5)
break;
short code = fetchResponse.errorCode(topic, partition);
System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// 异常表示是offset异常,重新获取偏移量即可
readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
clientName);
}
continue;
}
numErrors = 0;
long numRead = 0;
System.out.println("readOffset=" + readOffset);
//// 接收数据没有异常,那么开始对数据进行具体操作
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
continue;
}
readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
// 处理数据
// System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
}
maxReads--;
if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
System.out.println(numRead);
}
System.out.println(maxReads);
if (consumer != null)
consumer.close();
}
示例7: run
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
// 获取指定Topic partition的元数据
PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
String leadBrokerHost = metadata.leader().host();
int leadBrokerPort = metadata.leader().port();
String clientName = "Client_" + topic + "_" + partition;
SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (maxReads > 0) {
int fetchSize = 100000;
//// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
readOffset, fetchSize).build();
// 发送请求到Kafka,并获得返回值
FetchResponse fetchResponse = consumer.fetch(req);
// 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
// 当异常连续出现次数超过5次的时候,程序抛出异常
if (fetchResponse.hasError()) {
numErrors++;
if (numErrors > 5)
break;
short code = fetchResponse.errorCode(topic, partition);
System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// 异常表示是offset异常,重新获取偏移量即可
readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
clientName);
}
continue;
}
numErrors = 0;
long numRead = 0;
//// 接收数据没有异常,那么开始对数据进行具体操作
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
continue;
}
readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
// 处理数据
System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
maxReads--;
}
if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null)
consumer.close();
}
示例8: fetchMessages
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {
String topic = config.topic;
FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
.maxWait(config.fetchWaitMaxMs).build();
FetchResponse fetchResponse = null;
SimpleConsumer simpleConsumer = null;
try {
simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
// LOG.error(message);
return null;
}
fetchResponse = simpleConsumer.fetch(req);
} catch (Exception e) {
if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
|| e instanceof UnresolvedAddressException) {
LOG.warn("Network error when fetching messages:", e);
if (simpleConsumer != null) {
String host = simpleConsumer.host();
int port = simpleConsumer.port();
simpleConsumer = null;
throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
}
} else {
throw new RuntimeException(e);
}
}
if (fetchResponse.hasError()) {
short code = fetchResponse.errorCode(topic, partition);
if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
long startOffset = getOffset(topic, partition, config.startOffsetTime);
offset = startOffset;
}
if(leaderBroker != null) {
LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
+ partition + "] error:" + code);
}else {
}
return null;
} else {
ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
return msgs;
}
}
示例9: read
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
@Override
public List<MessageAndOffset> read(long offset) throws StageException {
FetchRequest req = buildFetchRequest(offset);
FetchResponse fetchResponse;
try {
fetchResponse = consumer.fetch(req);
} catch (Exception e) {
if(e instanceof SocketTimeoutException) {
//If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
//consumer if no message is available for consumption after the specified timeout value.
//If this happens exit gracefully
LOG.warn(KafkaErrors.KAFKA_28.getMessage());
return Collections.emptyList();
} else {
throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
}
}
if(fetchResponse.hasError()) {
short code = fetchResponse.errorCode(topic, partition);
if(code == ErrorMapping.OffsetOutOfRangeCode()) {
//invalid offset
offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
} else {
//try re-initializing connection with kafka
consumer.close();
consumer = null;
leader = findNewLeader(leader, topic, partition);
}
//re-fetch
req = buildFetchRequest(offset);
fetchResponse = consumer.fetch(req);
if(fetchResponse.hasError()) {
//could not fetch the second time, give kafka some time
LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
}
}
List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < offset) {
LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
continue;
}
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
MessageAndOffset partitionToPayloadMap = new MessageAndOffset(bytes, messageAndOffset.nextOffset(), partition);
partitionToPayloadMapArrayList.add(partitionToPayloadMap);
}
return partitionToPayloadMapArrayList;
}
示例10: fetch
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
FetchResponse response = null;
Broker previousLeader = leaderBroker;
while (true) {
ensureConsumer(previousLeader);
if (offset == Long.MAX_VALUE) {
offset = getOffset(false);
logger.info("offset max long, fetch from latest in kafka {}", offset);
}
FetchRequest request = new FetchRequestBuilder()
.clientId(clientId)
.addFetch(topic, partitionId, offset, 100000000)
.maxWait(timeoutMs)
.minBytes(1)
.build();
//logger.debug("fetch offset {}", offset);
try {
response = consumer.fetch(request);
} catch (Exception e) {
// e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
if (Thread.interrupted()) {
logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
e.getClass().getName(), topic, partitionId, offset);
throw new InterruptedException();
}
logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
response = null;
}
if (response == null || response.hasError()) {
short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);
boolean needNewLeader = false;
if (errorCode == ErrorMapping.RequestTimedOutCode()) {
//TODO: leave it here
} else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
//TODO: fetch the earliest offset or latest offset ?
// seems no obvious correct way to handle it
long earliestOffset = getOffset(true);
logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
if (earliestOffset < 0) {
needNewLeader = true;
} else {
newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
offset = earliestOffset;
continue;
}
} else {
needNewLeader = true;
}
if (needNewLeader) {
stopConsumer();
previousLeader = leaderBroker;
leaderBroker = null;
continue;
}
} else {
break;
}
}
return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
(newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}