当前位置: 首页>>代码示例>>Java>>正文


Java OffsetResponse.offsets方法代码示例

本文整理汇总了Java中kafka.javaapi.OffsetResponse.offsets方法的典型用法代码示例。如果您正苦于以下问题:Java OffsetResponse.offsets方法的具体用法?Java OffsetResponse.offsets怎么用?Java OffsetResponse.offsets使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.OffsetResponse的用法示例。


在下文中一共展示了OffsetResponse.offsets方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static OffsetInfo getOffset(String topic, PartitionMetadata partition) {
  Broker broker = partition.leader();

  SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), 10000, 1000000,
                                               "com.rekko.newrelic.storm.kafka");
  try {
    TopicAndPartition
        topicAndPartition =
        new TopicAndPartition(topic, partition.partitionId());
    PartitionOffsetRequestInfo rquest = new PartitionOffsetRequestInfo(-1, 1);
    Map<TopicAndPartition, PartitionOffsetRequestInfo>
        map =
        new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    map.put(topicAndPartition, rquest);
    OffsetRequest req = new OffsetRequest(map, (short) 0, "com.rekko.newrelic.storm.kafka");
    OffsetResponse resp = consumer.getOffsetsBefore(req);
    OffsetInfo offset = new OffsetInfo();
    offset.offset = resp.offsets(topic, partition.partitionId())[0];
    return offset;
  } finally {
    consumer.close();
  }
}
 
开发者ID:ghais,项目名称:newrelic_storm_kafka,代码行数:24,代码来源:Kafka.java

示例2: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String
        clientName) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition,
            PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
    OffsetResponse response = consumer.getOffsetsBefore(request);
    if (response.hasError()) {
        System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic,
                partition));
        return 0;
    }
    long[] offsets = response.offsets(topic, partition);
    return offsets[0];
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:17,代码来源:SimpleConsumerExample.java

示例3: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
                                  long whichTime) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
        kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
    OffsetResponse response = consumer.getOffsetsBefore(request);

    if (response.hasError()) {
        System.out.println("Error fetching data Offset Data the Broker. Reason: "
                           + response.errorCode(topic, partition));
        return 0;
    }
    long[] offsets = response.offsets(topic, partition);
    return offsets[0];
}
 
开发者ID:warlock-china,项目名称:azeroth,代码行数:18,代码来源:ZkConsumerCommand.java

示例4: getOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static Long getOffset(OffsetResponse response, TopicAndPartition topicPartition) {
  String topic = topicPartition.topic();
  int partition = topicPartition.partition();
  long[] offsets = response.offsets(topic, partition);
  if (offsets.length > 0) {
    return offsets[0];
  }
  short errorCode = response.errorCode(topic, partition);
  if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
    log.info("Unknown topic or partition {} {}", topic, partition);
    return null;
  }
  throw new IllegalStateException(
      "Error reading offset for " + topic + " / " + partition + ": " +
      ErrorMapping.exceptionNameFor(errorCode));
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:17,代码来源:KafkaUtils.java

示例5: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime) {
	TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
	Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
	requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
	kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
			kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
	OffsetResponse response = consumer.getOffsetsBefore(request);

	if (response.hasError()) {
		System.out.println(
				"Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
		return 0;
	}
	long[] offsets = response.offsets(topic, partition);
	return offsets[0];
}
 
开发者ID:vakinge,项目名称:jeesuite-libs,代码行数:17,代码来源:ZkConsumerCommand.java

示例6: getLatestOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) {
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request =
      new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  OffsetResponse response = consumer.getOffsetsBefore(request);

  if (response.hasError()) {
    logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition,
        response.errorCode(topicAndPartition.topic(), topicAndPartition.partition()));
    return -1;
  }

  long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition());
  return offsets[0];
}
 
开发者ID:uber,项目名称:chaperone,代码行数:17,代码来源:KafkaMonitor.java

示例7: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
/**
 * @param consumer
 * @param topic
 * @param partition
 * @param whichTime
 * @param clientName
 * @return 0 if consumer is null at this time
 */
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName)
{
  if (consumer == null) {
    return 0;
  }
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
  OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);

  if (response.hasError()) {
    logger.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
    return 0;
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:27,代码来源:KafkaMetadataUtil.java

示例8: fetchResetOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public long fetchResetOffset(String reset){
		long time = LatestTime();
		if (reset != null && reset.equals(SmallestTimeString()))
			time = EarliestTime();
		Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
		TopicAndPartition tp = new TopicAndPartition(topic, partition);
		PartitionOffsetRequestInfo info = new PartitionOffsetRequestInfo(time,1);
		requestInfo.put(tp, info);
		OffsetRequest request = new OffsetRequest(requestInfo,CurrentVersion(), clientId);
		OffsetResponse response = consumer.getOffsetsBefore(request);
		if (response.hasError()) {
			//ErrorMapping.exceptionFor(response.errorCode(topic, partition)).printStackTrace();
			throw new KafkaPartitionReaderException(response.errorCode(topic, partition));
		}
		long[] offsets = response.offsets(topic, partition);
		//TODO: confirm with xiaoju why we need this check?
//		if (offsets.length <= 0)
//			continue;
		return offsets[0];
	}
 
开发者ID:pulsarIO,项目名称:druid-kafka-ext,代码行数:21,代码来源:ConsumerPartitionReader.java

示例9: findAllOffsets

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId)
{
    TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);

    // The API implies that this will always return all of the offsets. So it seems a partition can not have
    // more than Integer.MAX_VALUE-1 segments.
    //
    // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
    // should not be 0.
    PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE);
    OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
    OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);

    if (offsetResponse.hasError()) {
        short errorCode = offsetResponse.errorCode(topicName, partitionId);
        log.warn("Offset response has error: %d", errorCode);
        throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
    }

    return offsetResponse.offsets(topicName, partitionId);
}
 
开发者ID:y-lan,项目名称:presto,代码行数:22,代码来源:KafkaSplitManager.java

示例10: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long getLastOffset(SimpleConsumer consumer, String topic, int partition,
                                 long whichTime, String clientName) throws StageException {
  try {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
      requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
    OffsetResponse response = consumer.getOffsetsBefore(request);

    if (response.hasError()) {
      LOG.error(KafkaErrors.KAFKA_22.getMessage(), consumer.host() + ":" + consumer.port(),
        response.errorCode(topic, partition));
      return 0;
    }
    long[] offsets = response.offsets(topic, partition);
    return offsets[0];
  } catch (Exception e) {
    LOG.error(KafkaErrors.KAFKA_30.getMessage(), e.toString(), e);
    throw new StageException(KafkaErrors.KAFKA_30, e.toString(), e);
  }
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:23,代码来源:KafkaLowLevelConsumer08.java

示例11: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public static long getLastOffset( SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName ) {
    TopicAndPartition topicAndPartition = new TopicAndPartition( topic, partition );
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo( whichTime, 1 ) );
    kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest( requestInfo,
            kafka.api.OffsetRequest.CurrentVersion(), clientName );
    OffsetResponse response = consumer.getOffsetsBefore( request );

    if ( response.hasError() ) {
        System.out.println( "Error fetching data Offset Data the Broker. Reason: "
                + response.errorCode( topic, partition ) );
        return 0;
    }
    long[] offsets = response.offsets( topic, partition );
    return offsets[0];
}
 
开发者ID:krux,项目名称:java-kafka-client-libs,代码行数:17,代码来源:KafkaLowLevelConsumer.java

示例12: getEarliestOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
@Override
public long getEarliestOffset() {
  if (this.earliestOffset == -2 && uri != null) {
    // TODO : Make the hardcoded paramters configurable
    SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
        1024 * 1024, "hadoop-etl");
    Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
        kafka.api.OffsetRequest.EarliestTime(), 1));
    OffsetResponse response = consumer
        .getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest
            .CurrentVersion(), "hadoop-etl"));
    long[] endOffset = response.offsets(topic, partition);
    consumer.close();
    this.earliestOffset = endOffset[0];
    return endOffset[0];
  } else {
    return this.earliestOffset;
  }
}
 
开发者ID:HiveKa,项目名称:HiveKa,代码行数:21,代码来源:KafkaRequest.java

示例13: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
@Override
public long getLastOffset(long time) {
  SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
      1024 * 1024, "hadoop-etl");
  Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
  offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
      time, 1));
  OffsetResponse response = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo,
      kafka.api.OffsetRequest.CurrentVersion(),"hadoop-etl"));
  long[] endOffset = response.offsets(topic, partition);
  consumer.close();
  if(endOffset.length == 0)
  {
    log.info("The exception is thrown because the latest offset retunred zero for topic : " + topic + " and partition " + partition);
  }
  this.latestOffset = endOffset[0];
  return endOffset[0];
}
 
开发者ID:HiveKa,项目名称:HiveKa,代码行数:19,代码来源:KafkaRequest.java

示例14: getLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
                                 long whichTime, String clientName) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
            requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
    OffsetResponse response = consumer.getOffsetsBefore(request);
 
    if (response.hasError()) {
        System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
        return 0;
    }
    long[] offsets = response.offsets(topic, partition);
    return offsets[0];
}
 
开发者ID:smallnest,项目名称:spring-kafka-demo,代码行数:17,代码来源:NativeSimpleConsumer.java

示例15: findLastOffset

import kafka.javaapi.OffsetResponse; //导入方法依赖的package包/类
private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(),
            topicPartition.getPartition());
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
            new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
            kafka.api.OffsetRequest.LatestTime(), 1));
    final String clientName = getClientName(topicPartition);
    OffsetRequest request = new OffsetRequest(requestInfo,
                                              kafka.api.OffsetRequest.CurrentVersion(),
                                              clientName);
    OffsetResponse response = consumer.getOffsetsBefore(request);

    if (response.hasError()) {
        throw new RuntimeException("Error fetching offset data. Reason: " +
                response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()));
    }
    long[] offsets = response.offsets(topicPartition.getTopic(),
            topicPartition.getPartition());
    return offsets[0] - 1;
}
 
开发者ID:pinterest,项目名称:secor,代码行数:22,代码来源:KafkaClient.java


注:本文中的kafka.javaapi.OffsetResponse.offsets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。