当前位置: 首页>>代码示例>>Java>>正文


Java FetchResponse.hasError方法代码示例

本文整理汇总了Java中kafka.javaapi.FetchResponse.hasError方法的典型用法代码示例。如果您正苦于以下问题:Java FetchResponse.hasError方法的具体用法?Java FetchResponse.hasError怎么用?Java FetchResponse.hasError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.FetchResponse的用法示例。


在下文中一共展示了FetchResponse.hasError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: openFetchRequest

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
private void openFetchRequest()
{
    if (messageAndOffsetIterator == null) {
        log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages);
        FetchRequest req = new FetchRequestBuilder()
                .clientId("presto-worker-" + Thread.currentThread().getName())
                .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE)
                .build();

        // TODO - this should look at the actual node this is running on and prefer
        // that copy if running locally. - look into NodeInfo
        SimpleConsumer consumer = consumerManager.getConsumer(split.getNodes().get(0));

        FetchResponse fetchResponse = consumer.fetch(req);
        if (fetchResponse.hasError()) {
            short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId());
            log.warn("Fetch response has error: %d", errorCode);
            throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
        }

        messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:24,代码来源:KafkaRecordSet.java

示例2: nextMessageSet

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
void nextMessageSet() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(1000).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    throw new Exception("TODO: handle the error, reset the consumer....");
  }
  
  currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  currentMessageSetIterator = currentMessageSet.iterator();
}
 
开发者ID:DemandCube,项目名称:Scribengin,代码行数:18,代码来源:KafkaPartitionReader.java

示例3: receive

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void receive() {
    SimpleConsumer consumer = createConsumer();
    kafka.api.FetchRequest fetchRequest = null;
    FetchResponse fetchResponse = null;
    int partition = part.partitionId();
    int errorNum = 0;
    if (offsetInit) {
        offsetInit = false;
        this.offset = getLastOffset(consumer, topic, partition, offsetTime, consumer.clientId());
    }
    while (offset > -1) {
        if (consumer == null) {
            consumer = createConsumer();
        }
        // 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        fetchRequest = new FetchRequestBuilder().clientId(consumer.clientId()).addFetch(topic, partition, offset,
                FETCH_SIZE).build();
        // 发送请求到Kafka,并获得返回值
        fetchResponse = consumer.fetch(fetchRequest);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            errorNum++;
            short errorCode = fetchResponse.errorCode(topic, partition);
            offset = dealFetchError(consumer, partition, offset, errorCode, errorNum);
            continue;
        }
        errorNum = 0;
        // 接收数据没有异常,那么开始对数据进行具体操作
        offset = dealFetchRecord(fetchResponse, partition, offset);
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:KafkaSimpleConsumer.java

示例4: fetchMessages

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) throws TopicOffsetOutOfRangeException, RuntimeException {
    ByteBufferMessageSet msgs = null;
    String topic = config.topic;
    int partitionId = partition.partition;
    FetchRequestBuilder builder = new FetchRequestBuilder();
    FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
            clientId(config.clientId).maxWait(config.fetchMaxWait).build();
    FetchResponse fetchResponse;
    try {
        fetchResponse = consumer.fetch(fetchRequest);
    } catch (Exception e) {
        if (e instanceof ConnectException ||
                e instanceof SocketTimeoutException ||
                e instanceof IOException ||
                e instanceof UnresolvedAddressException
                ) {
            LOG.warn("Network error when fetching messages:", e);
            throw new FailedFetchException(e);
        } else {
            throw new RuntimeException(e);
        }
    }
    if (fetchResponse.hasError()) {
        KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
        if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange) {
            String msg = "Got fetch request with offset out of range: [" + offset + "]";
            LOG.warn(msg);
            throw new TopicOffsetOutOfRangeException(msg);
        } else {
            String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
            LOG.error(message);
            throw new FailedFetchException(message);
        }
    } else {
        msgs = fetchResponse.messageSet(topic, partitionId);
    }
    return msgs;
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:39,代码来源:KafkaUtils.java

示例5: getFetchResponseForFetchRequest

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest,
    KafkaPartition partition) {
  SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort());

  FetchResponse fetchResponse = consumer.fetch(fetchRequest);
  if (fetchResponse.hasError()) {
    throw new RuntimeException(
        String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId())));
  }
  return fetchResponse;
}
 
开发者ID:Hanmourang,项目名称:Gobblin,代码行数:12,代码来源:KafkaWrapper.java

示例6: fetch

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
/** 返回消费的消息List, 指定offset
 * 
 * @param topic The topic name
 * @param partition Topic position
 * @param offset	Starting byte offset
 * @return
 * @throws Exception
 */
static List<String> fetch(SimpleConsumer simpleConsumer, String topic, int partition, long offset) throws Exception{
	List<String> retList = new ArrayList<String>();
	FetchRequest fetchRequest = getFetchRequest(simpleConsumer,topic, partition, offset);
	
	FetchResponse fetchResponse = null;
	try {
		fetchResponse = simpleConsumer.fetch(fetchRequest);
	} catch (Exception e) {
		 if (e instanceof ConnectException) {
                throw new FailedFetchException(e);
            } else {
                throw new RuntimeException(e);
            }
	}
	
	if (fetchResponse.hasError()) {
		KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partition));
		String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
		LOG.error(message);
		throw new FailedFetchException(message);
	}
	
	ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partition);
	
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		String msg = new String(bytes, "UTF-8");
		retList.add(msg);
	}
	
	return retList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:43,代码来源:SimpleKafkaHelper.java

示例7: getFetchResponseForFetchRequest

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) {
  SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort());

  FetchResponse fetchResponse = consumer.fetch(fetchRequest);
  if (fetchResponse.hasError()) {
    throw new RuntimeException(String.format("error code %d",
        fetchResponse.errorCode(partition.getTopicName(), partition.getId())));
  }
  return fetchResponse;
}
 
开发者ID:apache,项目名称:incubator-gobblin,代码行数:11,代码来源:Kafka08ConsumerClient.java

示例8: fetchMessages

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
    ByteBufferMessageSet msgs = null;
    String topic = config.topic;
    int partitionId = partition.partition;
    for (int errors = 0; errors < 2 && msgs == null; errors++) {
        FetchRequestBuilder builder = new FetchRequestBuilder();
        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
                clientId(config.clientId).build();
        FetchResponse fetchResponse;
        try {
            fetchResponse = consumer.fetch(fetchRequest);
        } catch (Exception e) {
            if (e instanceof ConnectException) {
                throw new FailedFetchException(e);
            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
                long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
                LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
                        "retrying with default start offset time from configuration. " +
                        "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
                offset = startOffset;
            } else {
                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
                LOG.error(message);
                throw new FailedFetchException(message);
            }
        } else {
            msgs = fetchResponse.messageSet(topic, partitionId);
        }
    }
    return msgs;
}
 
开发者ID:metamx,项目名称:incubator-storm,代码行数:38,代码来源:KafkaUtils.java

示例9: fetchMessages

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
/**
 * Fetch messages and the per-partition high watermark from Kafka between the specified offsets.
 *
 * @param startOffset The offset of the first message desired, inclusive
 * @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset.
 * @param timeoutMillis Timeout in milliseconds
 * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
 * milliseconds
 * @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for
 * this partition.
 */
public synchronized MessageBatch fetchMessages(long startOffset, long endOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException {
  Preconditions.checkState(!_metadataOnlyConsumer, "Cannot fetch messages from a metadata-only SimpleConsumerWrapper");
  // Ensure that we're connected to the leader
  // TODO Improve error handling

  final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis;
  while(_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER &&
      System.currentTimeMillis() < connectEndTime) {
    _currentState.process();
  }
  if (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER &&
      connectEndTime <= System.currentTimeMillis()) {
    throw new java.util.concurrent.TimeoutException();
  }

  FetchResponse fetchResponse = _simpleConsumer.fetch(new FetchRequestBuilder()
      .minBytes(100000)
      .maxWait(timeoutMillis)
      .addFetch(_topic, _partition, startOffset, 500000)
      .build());

  if (!fetchResponse.hasError()) {
    final Iterable<MessageAndOffset> messageAndOffsetIterable =
        buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset);

    // TODO: Instantiate with factory
    return new SimpleConsumerMessageBatch(messageAndOffsetIterable);
  } else {
    throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition));
  }
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:43,代码来源:SimpleConsumerWrapper.java

示例10: execute

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
开发者ID:DemandCube,项目名称:Scribengin,代码行数:31,代码来源:KafkaPartitionReader.java

示例11: run

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 获取指定Topic partition的元数据
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 发送请求到Kafka,并获得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 异常表示是offset异常,重新获取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        System.out.println("readOffset=" + readOffset);
        //// 接收数据没有异常,那么开始对数据进行具体操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 处理数据
            // System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
        }
        maxReads--;
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
        System.out.println(numRead);
    }
    System.out.println(maxReads);
    if (consumer != null)
        consumer.close();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:68,代码来源:SimpleConsumerExample.java

示例12: run

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 获取指定Topic partition的元数据
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 构建获取数据的请求对象, 给定获取数据对应的topic、partition、offset以及每次获取数据最多获取条数
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 发送请求到Kafka,并获得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回对象表示存在异常,进行异常处理,并进行consumer重新连接的操作
        // 当异常连续出现次数超过5次的时候,程序抛出异常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 异常表示是offset异常,重新获取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        //// 接收数据没有异常,那么开始对数据进行具体操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 处理数据
            System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
            maxReads--;
        }
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
    }
    if (consumer != null)
        consumer.close();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:65,代码来源:SimpleConsumerTest.java

示例13: fetchMessages

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:48,代码来源:KafkaConsumer.java

示例14: readEvents

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
/**
 * 	read events.
 * 
 * any errors occurred druing the read process are wrapped as KafkaPartitionReaderException which contains the error code
 * the exception should be processed by consumer.
 * 
 * @return
 * @throws KafkaPartitionReaderException
 */
public List<MessageAndMetadata<byte[],byte[]>> readEvents() throws KafkaPartitionReaderException {
	List<MessageAndMetadata<byte[],byte[]> > events = new ArrayList<MessageAndMetadata<byte[],byte[]>>();
	if(isClosed()){
		return events;
	}
	//LOG.log("Start Reading PartitionReader from ["+readOffset+"] once, Topic["+topic+"] partition["+partition+"]");
	if (nextBatchSizeBytes < 0)
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();

	if (nextBatchSizeBytes == 0) {
		// nextBatchSize only affects one fetch
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
		return events;
	}

	boolean  hasMessage=false;
	ByteBufferMessageSet messageSet=null;
	do{
		FetchRequest req = new FetchRequestBuilder()
		.clientId(clientId)
		.addFetch(topic, partition, readOffset,
				nextBatchSizeBytes).build();

		FetchResponse fetchResponse = null;
		fetchResponse = consumer.fetch(req);
		if (fetchResponse.hasError()) {
			short code = fetchResponse.errorCode(topic, partition);
			throw new KafkaPartitionReaderException(code);
		} else {
			messageSet = fetchResponse.messageSet(topic, partition);
			hasMessage = messageSet.iterator().hasNext();
			if(!hasMessage)
			nextBatchSizeBytes = Math.min(
					nextBatchSizeBytes * 2,config.fetchMessageMaxBytes()
					/*config.getMaxBatchSizeBytes()*/);
		}
	}while(!hasMessage && !readToTheEnd());//TODO: test readToTheEnd() , consider the config.getMaxBatchSizeBytes().
	if(!hasMessage){
		//set this reader on idle.
		onIdle();
		nextBatchSizeBytes =config.fetchMinBytes();// config.getBatchSizeBytes();
		return events;//return empty events.
	}
	for (MessageAndOffset messageAndOffset : messageSet) {
		long currentOffset = messageAndOffset.offset();
		if (currentOffset < readOffset) {
			continue;
		}
		readOffset = messageAndOffset.nextOffset();
		Message message = messageAndOffset.message();
		MessageAndMetadata<byte[],byte[]> mam=new MessageAndMetadata<byte[],byte[]>(topic, partition, message, readOffset, decoder, decoder);
		events.add(mam);
	
	}
	// nextBatchSize only affects one fetch
	nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
	return events;
}
 
开发者ID:pulsarIO,项目名称:druid-kafka-ext,代码行数:68,代码来源:ConsumerPartitionReader.java

示例15: read

import kafka.javaapi.FetchResponse; //导入方法依赖的package包/类
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(bytes, messageAndOffset.nextOffset(), partition);
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:57,代码来源:KafkaLowLevelConsumer08.java


注:本文中的kafka.javaapi.FetchResponse.hasError方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。