當前位置: 首頁>>代碼示例>>Java>>正文


Java MessageAndMetadata.offset方法代碼示例

本文整理匯總了Java中kafka.message.MessageAndMetadata.offset方法的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndMetadata.offset方法的具體用法?Java MessageAndMetadata.offset怎麽用?Java MessageAndMetadata.offset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在kafka.message.MessageAndMetadata的用法示例。


在下文中一共展示了MessageAndMetadata.offset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: KafkaIndexingManager

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
                            LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
  super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
    @Override
    public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
      LindenIndexRequest indexRequest = null;
      long offset = messageAndMetadata.offset();
      long partition = messageAndMetadata.partition();
      String message = new String(messageAndMetadata.message());
      try {
        indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
        LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
                    indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
      } catch (IOException e) {
        LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
      }
      return indexRequest;
    }
  }, shardingStrategy);
}
 
開發者ID:XiaoMi,項目名稱:linden,代碼行數:21,代碼來源:KafkaIndexingManager.java

示例2: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
 	Iote2eRequestReuseItem iote2eRequestReuseItem = new Iote2eRequestReuseItem();
     ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
     while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
try {
      	String summary = 
      			"Thread " + threadNumber + 
      			", topic=" + messageAndMetadata.topic() + 
      			", partition=" + messageAndMetadata.partition() + 
      			", key=" + key + 
      			", offset=" + messageAndMetadata.offset() + 
      			", timestamp=" + messageAndMetadata.timestamp() + 
      			", timestampType=" + messageAndMetadata.timestampType() + 
      			", iote2eRequest=" + iote2eRequestReuseItem.fromByteArray(messageAndMetadata.message()).toString();
      	logger.info(">>> Consumed: " + summary);
} catch( Exception e ) {
	logger.error(e.getMessage(), e);
}
     }
     logger.info(">>> Shutting down Thread: " + threadNumber);
 }
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:24,代碼來源:KafkaAvroDemo.java

示例3: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
    ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
    while (it.hasNext()) {
    	MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
    	String key = new String(  messageAndMetadata.key() );
    	String message = new String(  messageAndMetadata.message() );
    	String summary = 
    			"Thread " + threadNumber + 
    			", topic=" + messageAndMetadata.topic() + 
    			", partition=" + messageAndMetadata.partition() + 
    			", key=" + key + 
    			", message=" + message + 
    			", offset=" + messageAndMetadata.offset() + 
    			", timestamp=" + messageAndMetadata.timestamp() + 
    			", timestampType=" + messageAndMetadata.timestampType();
    	logger.info(">>> Consumed: " + summary);
    }
    logger.info(">>> Shutting down Thread: " + threadNumber);
}
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:20,代碼來源:KafkaStringDemo.java

示例4: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
	try {
		ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
		Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());

		while (it.hasNext()) {
			MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
			String key = new String(messageAndMetadata.key());
			User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
			// User user = (User)
			// recordInjection.invert(messageAndMetadata.message()).get();
			String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
					+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
					+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
					+ ", timestampType=" + messageAndMetadata.timestampType();
			System.out.println(summary);
		}
		System.out.println("Shutting down Thread: " + m_threadNumber);
	} catch (Exception e) {
		System.out.println("Exception in thread "+m_threadNumber);
		System.out.println(e);
		e.printStackTrace();
	}
}
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:25,代碼來源:AvroConsumerThread.java

示例5: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
    ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
    while (it.hasNext()) {
    	MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
    	String key = new String(  messageAndMetadata.key() );
    	String message = new String(  messageAndMetadata.message() );
    	String summary = 
    			"Thread " + threadNumber + 
    			", topic=" + messageAndMetadata.topic() + 
    			", partition=" + messageAndMetadata.partition() + 
    			", key=" + key + 
    			", message=" + message + 
    			", offset=" + messageAndMetadata.offset() + 
    			", timestamp=" + messageAndMetadata.timestamp() + 
    			", timestampType=" + messageAndMetadata.timestampType();
    	System.out.println(summary);
    }
    System.out.println("Shutting down Thread: " + threadNumber);
}
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:20,代碼來源:ConsumerDemoThread.java

示例6: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public void run() {
    ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<byte[], byte[]> md = it.next();
        byte msg[] = md.message();
        long offset = md.offset();
        String smsg = new String(msg);
        try {
            m_loader.insertRow(new RowWithMetaData(smsg, offset), m_csvParser.parseLine(smsg));
        } catch (Exception ex) {
            m_log.error("Consumer stopped", ex);
            System.exit(1);
        }
    }
}
 
開發者ID:anhnv-3991,項目名稱:VoltDB,代碼行數:17,代碼來源:KafkaLoader.java

示例7: read

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public MessageAndOffset read() throws StageException {
  try {
    //has next blocks indefinitely if consumer.timeout.ms is set to -1
    //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
    //if no message is written to kafka topic in that time.
    if(consumerIterator.hasNext()) {
      MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
      byte[] message = messageAndMetadata.message();
      long offset = messageAndMetadata.offset();
      int partition = messageAndMetadata.partition();
      return new MessageAndOffset(message, offset, partition);
    }
    return null;
  } catch (ConsumerTimeoutException e) {
    /*For high level consumer the fetching logic is handled by a background
      fetcher thread and is hidden from user, for either case of
      1) broker down or
      2) no message is available
      the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
      data into the buffer until timeout. So in a sentence the high-level consumer design is to
      not let users worry about connect / reconnect issues.*/
    return null;
  }
}
 
開發者ID:streamsets,項目名稱:datacollector,代碼行數:26,代碼來源:KafkaConsumer08.java

示例8: receive

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public BaseConsumerRecord receive() {
  if (!_iter.hasNext())
    return null;
  MessageAndMetadata<String, String> record = _iter.next();
  return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.message());
}
 
開發者ID:linkedin,項目名稱:kafka-monitor,代碼行數:8,代碼來源:OldConsumer.java

示例9: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
	try {
		ConsumerIterator<byte[], byte[]> it = stream.iterator();
		BinaryDecoder binaryDecoder = null;
		Weather weatherRead = null;
		DatumReader<Weather> datumReaderWeather = new SpecificDatumReader<Weather>(Weather.getClassSchema());

		while (it.hasNext()) {
			MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
			String key = new String(messageAndMetadata.key());
			binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
			weatherRead = datumReaderWeather.read(weatherRead, binaryDecoder);
			// User user = (User)
			// recordInjection.invert(messageAndMetadata.message()).get();
			String summary = "Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
					+ messageAndMetadata.partition() + ", key=" + key + ", offset="
					+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
					+ ", timestampType=" + messageAndMetadata.timestampType()
					+ ", weatherRead=" + weatherRead.toString();
			System.out.println(summary);
		}
		System.out.println("Shutting down Thread: " + threadNumber);
	} catch (Exception e) {
		System.out.println("Exception in thread "+threadNumber);
		System.out.println(e);
		e.printStackTrace();
	}
}
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:29,代碼來源:AvroConsumerWeatherThread.java

示例10: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public void run() {
	try {
		ConsumerIterator<byte[], byte[]> it = stream.iterator();
		BinaryDecoder binaryDecoder = null;
		Wave waveRead = null;
		DatumReader<Wave> datumReaderWave = new SpecificDatumReader<Wave>(Wave.getClassSchema());

		while (it.hasNext()) {
			MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
			String key = new String(messageAndMetadata.key());
			binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
			waveRead = datumReaderWave.read(waveRead, binaryDecoder);
			// User user = (User)
			// recordInjection.invert(messageAndMetadata.message()).get();
			String summary = ">>> CONSUMER: Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
					+ messageAndMetadata.partition() + ", key=" + key + ", offset="
					+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
					+ ", timestampType=" + messageAndMetadata.timestampType()
					+ ", waveRead=" + waveRead.toString();
			System.out.println(summary);
		}
		System.out.println("Shutting down Thread: " + threadNumber);
	} catch (Exception e) {
		System.out.println("Exception in thread "+threadNumber);
		System.out.println(e);
		e.printStackTrace();
	}
}
 
開發者ID:petezybrick,項目名稱:iote2e,代碼行數:29,代碼來源:AvroConsumerWaveThread.java

示例11: read

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
private void read(final KafkaStream<String, String> stream) {
    while (stream.iterator().hasNext()) {
        final int phase = phaser.register();

        final MessageAndMetadata<String, String> msg = stream.iterator().next();
        final long offset = msg.offset();
        final long partition = msg.partition();
        unacknowledgedOffsets.add(offset);
        lastCommittedOffset.compareAndSet(0, offset);
        currentPartition.compareAndSet(-1, partition);

        final String jsonString = msg.message();

        handler.handle(configuration.getVertxAddress(), jsonString, () -> {
            unacknowledgedOffsets.remove(offset);
            phaser.arriveAndDeregister();
        });

        if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged()
                || partititionChanged(partition)
                || tooManyUncommittedOffsets(offset)
                || commitTimeoutReached()) {
            LOG.info("Got {} unacknowledged messages, waiting for ACKs in order to commit", unacknowledgedOffsets.size());
            if (!waitForAcks(phase)) {
                return;
            }
            commitOffsetsIfAllAcknowledged(offset);
            LOG.info("Continuing message processing");
        }
    }
}
 
開發者ID:hubrick,項目名稱:vertx-kafka-consumer,代碼行數:32,代碼來源:VertxKafkaConsumer.java

示例12: Message

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public Message(MessageAndMetadata<byte[], byte[]> message) {
    this.topic = message.topic();

    this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null;
    this.message = new String(message.message(), Charset.forName("utf-8"));

    this.partition = message.partition();
    this.offset = message.offset();
}
 
開發者ID:elodina,項目名稱:dropwizard-kafka-http,代碼行數:10,代碼來源:MessageResource.java

示例13: run

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public void run() {

    LOGGER.info("start to run Injector{} for Topic{}", WispKafkaInjector.class.toString(), topic);

    while (it.hasNext()) {

        try {

            MessageAndMetadata<byte[], byte[]> mm = it.next();
            String message = new String(mm.message());

            // partition && offset
            long partition = mm.partition();
            long offset = mm.offset();

            MysqlEntry entry = gson.fromJson(message, MysqlEntry.class);

            // warp
            MysqlEntryWrap mysqlEntryWrap = new MysqlEntryWrap(topic, entry);

            LOGGER.debug(message);

            // 計算延遲時間
            long now = System.currentTimeMillis();
            long elapsedSinceMysql = (now - entry.getTime()) / 1000;
            long elapsedSinceCanal = (now - entry.getCanalTime()) / 1000;

            String originTableName = entry.getTable();

            if (injectorEventProcessTemplate != null) {
                injectorEventProcessTemplate.processEntry(mysqlEntryWrap);
            }

            LOGGER.info(
                    "Topic({}) Succeed to do Event{} inject from Table{}, mysql_delay={}, "
                            + "canal_delay={}, partition={}, offset={}",
                    topic,
                    entry.getEvent(),
                    originTableName, elapsedSinceMysql, elapsedSinceCanal, partition, offset);

        } catch (Throwable e) {

            LOGGER.error(e.toString());
        }
    }

}
 
開發者ID:warlock-china,項目名稱:wisp,代碼行數:49,代碼來源:InjectorSupport.java

示例14: nextMetaMessage

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
public IngestionMetaMessage nextMetaMessage() {
 MessageAndMetadata<Long, String> meta = it.next();
 return new IngestionMetaMessage(meta.key(), meta.message(), meta.topic(), meta.partition(), meta.offset());
}
 
開發者ID:IntersysConsulting,項目名稱:ingestive,代碼行數:5,代碼來源:IngestionHighLevelConsumer.java

示例15: consumeMessages

import kafka.message.MessageAndMetadata; //導入方法依賴的package包/類
@Override
public void consumeMessages() {
    //dispatcherThr.scheduleAtFixedRate(new DispatchMonitor(), 1l,1l, TimeUnit.SECONDS);

    Map<String, Integer> topicCount = new HashMap<>();
    // Define single thread for topic
    topicCount.put(topic, new Integer(1));

    Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams =
            consumer.createMessageStreams(topicCount);

    List<KafkaStream<byte[], byte[]>> streams = consumerStreams.
            get(topic);

    MessageBatch dataBatch = new MessageBatch();

    for (final KafkaStream stream : streams) {

        ConsumerIterator<byte[], byte[]> consumerIte = stream.
                iterator();

        streamHandle = consumerIte;

        while (consumerIte.hasNext()) {
            lastTimeUpdated.set(System.currentTimeMillis());

            MessageAndMetadata<byte[], byte[]> payload = consumerIte.next();

            int partitionKey = payload.partition();
            long offset = payload.offset();

            dataBatch.getDataBatch().add(payload.message());
            //TODO: work on timed sending of messages when rcvd message is smaller
            if (dataBatch.getDataBatch().size() >= maxBatchSize) {
                OffsetInfo offsetInfo = new OffsetInfo(topic, partitionKey, offset);
                dataBatch.setOffsetInfo(offsetInfo);
                //send it across
                BatchPersistManager.getInstance().submitBatch(dataBatch);

                dataBatch = new MessageBatch();
            }
        }//while

        System.out.println("Ended the while stream...");
    }//for streams

    // break in loop , send the last batch

}
 
開發者ID:sumanthn,項目名稱:dataflux,代碼行數:50,代碼來源:WebTxnMsgConsumer.java


注:本文中的kafka.message.MessageAndMetadata.offset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。