本文整理汇总了Java中kafka.common.ErrorMapping.NoError方法的典型用法代码示例。如果您正苦于以下问题:Java ErrorMapping.NoError方法的具体用法?Java ErrorMapping.NoError怎么用?Java ErrorMapping.NoError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.common.ErrorMapping
的用法示例。
在下文中一共展示了ErrorMapping.NoError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: continueItr
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff
* the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method
* returns false, you risk a {@link NullPointerException} OR a no-more-elements exception.
*
* @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}.
*/
@VisibleForTesting
boolean continueItr() {
final long remaining = end - currentOffset;
if (!canCallNext() && remaining > 0) {
final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize;
LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize,
currentOffset));
final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition()
.getPartId(), currentOffset, theFetchSize);
final ByteBufferMessageSet msg = consumer.fetch(request);
final int errorCode = msg.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
return false;
}
if (errorCode != ErrorMapping.NoError()) {
ErrorMapping.maybeThrowException(errorCode);
} // --> else we try to grab the next iterator
currentMessageItr = msg.iterator();
currentOffset += msg.validBytes();
}
return canCallNext();
}
示例2: hasError
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* Called by the default implementation of {@link #map} to check error code
* to determine whether to continue.
*/
protected boolean hasError(ByteBufferMessageSet messages)
throws IOException {
int errorCode = messages.getErrorCode();
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
/* offset cannot cross the maximum offset (guaranteed by Kafka protocol).
Kafka server may delete old files from time to time */
System.err.println("WARNING: current offset=" + _offset + ". It is out of range.");
if (_retry >= MAX_RETRY_TIME) return true;
_retry++;
// get the current offset range
_offsetRange = getOffsetRange();
_offset = _offsetRange[0];
return false;
} else if (errorCode == ErrorMapping.InvalidMessageCode()) {
throw new IOException(_input + " current offset=" + _offset
+ " : invalid offset.");
} else if (errorCode == ErrorMapping.WrongPartitionCode()) {
throw new IOException(_input + " : wrong partition");
} else if (errorCode != ErrorMapping.NoError()) {
throw new IOException(_input + " current offset=" + _offset
+ " error:" + errorCode);
} else
return false;
}
示例3: getOffsetOfTopicAndPartition
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* 从保存consumer消费者offset偏移量的位置获取当前consumer对应的偏移量
*
* @param consumer 消费者
* @param groupId Group Id
* @param clientName client名称
* @param topic topic名称
* @param partitionID 分区id
*
* @return
*/
public long getOffsetOfTopicAndPartition(SimpleConsumer consumer, String groupId, String clientName, String
topic, int partitionID) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionID);
List<TopicAndPartition> requestInfo = new ArrayList<TopicAndPartition>();
requestInfo.add(topicAndPartition);
OffsetFetchRequest request = new OffsetFetchRequest(groupId, requestInfo, 0, clientName);
OffsetFetchResponse response = consumer.fetchOffsets(request);
// 获取返回值
Map<TopicAndPartition, OffsetMetadataAndError> returnOffsetMetadata = response.offsets();
// 处理返回值
if (returnOffsetMetadata != null && !returnOffsetMetadata.isEmpty()) {
// 获取当前分区对应的偏移量信息
OffsetMetadataAndError offset = returnOffsetMetadata.get(topicAndPartition);
if (offset.error().code() == ErrorMapping.NoError()) {
// 没有异常,表示是正常的,获取偏移量
return offset.offset();
} else {
// 当Consumer第一次连接的时候(zk中不在当前topic对应数据的时候),会产生UnknownTopicOrPartitionCode异常
System.out.println("Error fetching data Offset Data the Topic and Partition. Reason: " + offset.error
());
}
}
// 所有异常情况直接返回0
return 0;
}
示例4: offsetManagerBroker
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
private Integer offsetManagerBroker(BlockingChannel channel, String groupId)
{
final ConsumerMetadataRequest request =
new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId());
LOG.debug("Sending consumer metadata request: {}", request);
channel.send(request);
ConsumerMetadataResponse response =
ConsumerMetadataResponse.readFrom(channel.receive().buffer());
LOG.debug("Received consumer metadata response: {}", response);
return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null;
}
示例5: earliestOrLatestOffset
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* Get the earliest or latest offset of a given topic, partition.
*
* @param topicAndPartition Topic and partition of which the offset is needed.
* @param earliestOrLatest A value to indicate earliest or latest offset.
* @param consumerId Id of the consumer which could be a consumer client, SimpleConsumerShell or a follower broker.
* @return Requested offset.
*/
public long earliestOrLatestOffset(final TopicAndPartition topicAndPartition, final long earliestOrLatest, int consumerId) {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(earliestOrLatest, 1));
OffsetRequest request = new OffsetRequest(requestInfo,
/*clientId = */clientId,
/*replicaId = */consumerId);
PartitionOffsetsResponse partitionErrorAndOffset = getOffsetsBefore(request).partitionErrorAndOffsets.get(topicAndPartition);
if (partitionErrorAndOffset.error == ErrorMapping.NoError) {
return Utils.head(partitionErrorAndOffset.offsets);
}
throw ErrorMapping.exceptionFor(partitionErrorAndOffset.error);
}
示例6: toString
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
@Override
public String toString() {
final StringBuilder topicMetadataInfo = new StringBuilder();
topicMetadataInfo.append(String.format("{TopicMetadata for topic %s -> ", topic));
if (errorCode == ErrorMapping.NoError) {
Utils.foreach(partitionsMetadata, new Callable1<PartitionMetadata>() {
@Override
public void apply(PartitionMetadata partitionMetadata) {
switch (partitionMetadata.errorCode) {
case ErrorMapping.NoError:
topicMetadataInfo.append(String.format("\nMetadata for partition [%s,%d] is %s", topic,
partitionMetadata.partitionId, partitionMetadata.toString()));
break;
case ErrorMapping.ReplicaNotAvailableCode:
// this error message means some replica other than the leader is not available. The consumer
// doesn't care about non leader replicas, so ignore this
topicMetadataInfo.append(String.format("\nMetadata for partition [%s,%d] is %s", topic,
partitionMetadata.partitionId, partitionMetadata.toString()));
break;
default:
topicMetadataInfo.append(String.format("\nMetadata for partition [%s,%d] is not available due to %s", topic,
partitionMetadata.partitionId, ErrorMapping.exceptionFor(partitionMetadata.errorCode).getClass().getName()));
}
}
});
} else {
topicMetadataInfo.append(String.format("\nNo partition metadata for topic %s due to %s", topic,
ErrorMapping.exceptionFor(errorCode).getClass().getName()));
}
topicMetadataInfo.append("}");
return topicMetadataInfo.toString();
}
示例7: getAllTopics
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
@Override
protected List<String> getAllTopics() {
List<String> topics = new LinkedList<>();
retryLoop: for (int retry = 0; retry < numRetries; retry++) {
brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);
try {
// clear in case we have an incomplete list from previous tries
topics.clear();
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
if (item.errorCode() != ErrorMapping.NoError()) {
// warn and try more brokers
LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
useNextAddressAsNewContactSeedBroker();
continue brokersLoop;
}
topics.add(item.topic());
}
break retryLoop; // leave the loop through the brokers
}
catch (Exception e) {
//validates seed brokers in case of a ClosedChannelException
validateSeedBrokers(seedBrokerAddresses, e);
LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
LOG.debug("Detailed trace", e);
// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
try {
Thread.sleep(500);
} catch (InterruptedException e1) {
// sleep shorter.
}
useNextAddressAsNewContactSeedBroker();
}
} // brokers loop
} // retries loop
return topics;
}
示例8: getPartitionLeadersForTopics
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
/**
* Send request to Kafka to get partitions for topics.
*
* @param topics The name of the topics.
*/
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();
retryLoop: for (int retry = 0; retry < numRetries; retry++) {
brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);
try {
// clear in case we have an incomplete list from previous tries
partitions.clear();
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
if (item.errorCode() != ErrorMapping.NoError()) {
// warn and try more brokers
LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
useNextAddressAsNewContactSeedBroker();
continue brokersLoop;
}
if (!topics.contains(item.topic())) {
LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");
useNextAddressAsNewContactSeedBroker();
continue brokersLoop;
}
for (PartitionMetadata part : item.partitionsMetadata()) {
Node leader = brokerToNode(part.leader());
KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
partitions.add(pInfo);
}
}
break retryLoop; // leave the loop through the brokers
}
catch (Exception e) {
//validates seed brokers in case of a ClosedChannelException
validateSeedBrokers(seedBrokerAddresses, e);
LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
LOG.debug("Detailed trace", e);
// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
try {
Thread.sleep(500);
} catch (InterruptedException e1) {
// sleep shorter.
}
useNextAddressAsNewContactSeedBroker();
}
} // brokers loop
} // retries loop
return partitions;
}
示例9: ControlledShutdownResponse
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public ControlledShutdownResponse(int correlationId, Set<TopicAndPartition> partitionsRemaining) {
this(correlationId, ErrorMapping.NoError, partitionsRemaining);
}
示例10: FetchResponsePartitionData
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public FetchResponsePartitionData(MessageSet messages) {
this(ErrorMapping.NoError, -1, messages);
}
示例11: TopicMetadata
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public TopicMetadata(String topic, List<PartitionMetadata> partitionsMetadata) {
this(topic, partitionsMetadata, ErrorMapping.NoError);
}
示例12: PartitionMetadata
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public PartitionMetadata(int partitionId, Broker leader, List<Broker> replicas) {
this(partitionId, leader, replicas, Lists.<Broker>newArrayList(), ErrorMapping.NoError);
}
示例13: LeaderAndIsrResponse
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public LeaderAndIsrResponse(int correlationId, Map<Tuple2<String, Integer>, Short> responseMap) {
this(correlationId, responseMap, ErrorMapping.NoError);
}
示例14: StopReplicaResponse
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public StopReplicaResponse(int correlationId,
Map<Tuple2<String, Integer>, Short> responseMap) {
this(correlationId, responseMap, ErrorMapping.NoError);
}
示例15: UpdateMetadataResponse
import kafka.common.ErrorMapping; //导入方法依赖的package包/类
public UpdateMetadataResponse(int correlationId) {
this(correlationId, ErrorMapping.NoError);
}