当前位置: 首页>>代码示例>>Java>>正文


Java FetchResponse.errorCode方法代码示例

本文整理汇总了Java中kafka.javaapi.FetchResponse.errorCode方法的典型用法代码示例。如果您正苦于以下问题:Java FetchResponse.errorCode方法的具体用法?Java FetchResponse.errorCode怎么用?Java FetchResponse.errorCode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.FetchResponse的用法示例。


在下文中一共展示了FetchResponse.errorCode方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: openFetchRequest

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
private void openFetchRequest()
{
    if (messageAndOffsetIterator == null) {
        log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages);
        FetchRequest req = new FetchRequestBuilder()
                .clientId("presto-worker-" + Thread.currentThread().getName())
                .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE)
                .build();

        // TODO - this should look at the actual node this is running on and prefer
        // that copy if running locally. - look into NodeInfo
        SimpleConsumer consumer = consumerManager.getConsumer(split.getNodes().get(0));

        FetchResponse fetchResponse = consumer.fetch(req);
        if (fetchResponse.hasError()) {
            short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId());
            log.warn("Fetch response has error: %d", errorCode);
            throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
        }

        messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:24,代码来源:KafkaRecordSet.java

示例2: receive

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void receive() {
    SimpleConsumer consumer = createConsumer();
    kafka.api.FetchRequest fetchRequest = null;
    FetchResponse fetchResponse = null;
    int partition = part.partitionId();
    int errorNum = 0;
    if (offsetInit) {
        offsetInit = false;
        this.offset = getLastOffset(consumer, topic, partition, offsetTime, consumer.clientId());
    }
    while (offset > -1) {
        if (consumer == null) {
            consumer = createConsumer();
        }
        // 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        fetchRequest = new FetchRequestBuilder().clientId(consumer.clientId()).addFetch(topic, partition, offset,
                FETCH_SIZE).build();
        // 发送请求到Kafka,并获得返回值
        fetchResponse = consumer.fetch(fetchRequest);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            errorNum++;
            short errorCode = fetchResponse.errorCode(topic, partition);
            offset = dealFetchError(consumer, partition, offset, errorCode, errorNum);
            continue;
        }
        errorNum = 0;
        // 接收数据没有异常,那么开始对数据进行具体操作
        offset = dealFetchRecord(fetchResponse, partition, offset);
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:KafkaSimpleConsumer.java

示例3: execute

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
开发者ID:DemandCube,项目名称:Scribengin,代码行数:31,代码来源:KafkaPartitionReader.java

示例4: run

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 获取指定Topic partition的元数据
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 发送请求到Kafka,并获得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 异常表示是offset异常,重新获取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        System.out.println("readOffset=" + readOffset);
        //// 接收数据没有异常,那么开始对数据进行具体操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 处理数据
            // System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
        }
        maxReads--;
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
        System.out.println(numRead);
    }
    System.out.println(maxReads);
    if (consumer != null)
        consumer.close();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:68,代码来源:SimpleConsumerExample.java

示例5: run

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 获取指定Topic partition的元数据
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 发送请求到Kafka,并获得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 异常表示是offset异常,重新获取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        //// 接收数据没有异常,那么开始对数据进行具体操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 处理数据
            System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
            maxReads--;
        }
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
    }
    if (consumer != null)
        consumer.close();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:65,代码来源:SimpleConsumerTest.java

示例6: fetchMessages

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:48,代码来源:KafkaConsumer.java

示例7: readEvents

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
/**
 * 	read events.
 * 
 * any errors occurred druing the read process are wrapped as KafkaPartitionReaderException which contains the error code
 * the exception should be processed by consumer.
 * 
 * @return
 * @throws KafkaPartitionReaderException
 */
public List<MessageAndMetadata<byte[],byte[]>> readEvents() throws KafkaPartitionReaderException {
	List<MessageAndMetadata<byte[],byte[]> > events = new ArrayList<MessageAndMetadata<byte[],byte[]>>();
	if(isClosed()){
		return events;
	}
	//LOG.log("Start Reading PartitionReader from ["+readOffset+"] once, Topic["+topic+"] partition["+partition+"]");
	if (nextBatchSizeBytes < 0)
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();

	if (nextBatchSizeBytes == 0) {
		// nextBatchSize only affects one fetch
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
		return events;
	}

	boolean  hasMessage=false;
	ByteBufferMessageSet messageSet=null;
	do{
		FetchRequest req = new FetchRequestBuilder()
		.clientId(clientId)
		.addFetch(topic, partition, readOffset,
				nextBatchSizeBytes).build();

		FetchResponse fetchResponse = null;
		fetchResponse = consumer.fetch(req);
		if (fetchResponse.hasError()) {
			short code = fetchResponse.errorCode(topic, partition);
			throw new KafkaPartitionReaderException(code);
		} else {
			messageSet = fetchResponse.messageSet(topic, partition);
			hasMessage = messageSet.iterator().hasNext();
			if(!hasMessage)
			nextBatchSizeBytes = Math.min(
					nextBatchSizeBytes * 2,config.fetchMessageMaxBytes()
					/*config.getMaxBatchSizeBytes()*/);
		}
	}while(!hasMessage && !readToTheEnd());//TODO: test readToTheEnd() , consider the config.getMaxBatchSizeBytes().
	if(!hasMessage){
		//set this reader on idle.
		onIdle();
		nextBatchSizeBytes =config.fetchMinBytes();// config.getBatchSizeBytes();
		return events;//return empty events.
	}
	for (MessageAndOffset messageAndOffset : messageSet) {
		long currentOffset = messageAndOffset.offset();
		if (currentOffset < readOffset) {
			continue;
		}
		readOffset = messageAndOffset.nextOffset();
		Message message = messageAndOffset.message();
		MessageAndMetadata<byte[],byte[]> mam=new MessageAndMetadata<byte[],byte[]>(topic, partition, message, readOffset, decoder, decoder);
		events.add(mam);
	
	}
	// nextBatchSize only affects one fetch
	nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
	return events;
}
 
开发者ID:pulsarIO,项目名称:druid-kafka-ext,代码行数:68,代码来源:ConsumerPartitionReader.java

示例8: read

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(bytes, messageAndOffset.nextOffset(), partition);
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:57,代码来源:KafkaLowLevelConsumer08.java

示例9: fetch

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        if (offset == Long.MAX_VALUE) {
            offset = getOffset(false);
            logger.info("offset max long, fetch from latest in kafka {}", offset);
        }

        FetchRequest request = new FetchRequestBuilder()
                .clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000)
                .maxWait(timeoutMs)
                .minBytes(1)
                .build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
            logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
        (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}
 
开发者ID:lyogavin,项目名称:Pistachio,代码行数:73,代码来源:KafkaSimpleConsumer.java

示例10: getMessage

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
private Message getMessage(TopicPartition topicPartition, long offset,
                           SimpleConsumer consumer) {
    LOG.debug("fetching message topic {} partition {} offset {}",
            topicPartition.getTopic(), topicPartition.getPartition(), offset);
    final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes();
    final String clientName = getClientName(topicPartition);
    kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName)
            .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset,
                      MAX_MESSAGE_SIZE_BYTES)
            .build();
    FetchResponse response = consumer.fetch(request);
    if (response.hasError()) {
        consumer.close();
        int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition());

        if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) {
          throw new MessageDoesNotExistException();
        } else {
          throw new RuntimeException("Error fetching offset data. Reason: " + errorCode);
        }
    }
    MessageAndOffset messageAndOffset = response.messageSet(
            topicPartition.getTopic(), topicPartition.getPartition()).iterator().next();
    byte[] keyBytes = null;
    if (messageAndOffset.message().hasKey()) {
        ByteBuffer key = messageAndOffset.message().key();
        keyBytes = new byte[key.limit()];
        key.get(keyBytes);
    }
    byte[] payloadBytes = null;
    if (!messageAndOffset.message().isNull()) {
        ByteBuffer payload = messageAndOffset.message().payload();
        payloadBytes = new byte[payload.limit()];
        payload.get(payloadBytes);
    }
    long timestamp = (mConfig.useKafkaTimestamp())
            ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset)
            : 0l;

    return new Message(topicPartition.getTopic(), topicPartition.getPartition(),
            messageAndOffset.offset(), keyBytes, payloadBytes, timestamp);
}
 
开发者ID:pinterest,项目名称:secor,代码行数:43,代码来源:KafkaClient.java


注:本文中的kafka.javaapi.FetchResponse.errorCode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。