當前位置: 首頁>>代碼示例>>Java>>正文


Java MessageAndOffset.message方法代碼示例

本文整理匯總了Java中kafka.message.MessageAndOffset.message方法的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndOffset.message方法的具體用法?Java MessageAndOffset.message怎麽用?Java MessageAndOffset.message使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在kafka.message.MessageAndOffset的用法示例。


在下文中一共展示了MessageAndOffset.message方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: verifyMessage

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private boolean verifyMessage(String key, String message) {
    long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
    MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
    Message kafkaMessage = messageAndOffset.message();
    ByteBuffer messageKeyBuffer = kafkaMessage.key();
    String keyString = null;
    String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
    if (messageKeyBuffer != null) {
        keyString = new String(Utils.toByteArray(messageKeyBuffer));
    }
    assertEquals(key, keyString);
    assertEquals(message, messageString);
    return true;
}
 
開發者ID:metamx,項目名稱:incubator-storm,代碼行數:17,代碼來源:KafkaBoltTest.java

示例2: transform

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public PartitionConsumerRecord<K, V> transform(TopicPartition topicPartition, MessageAndOffset messageAndOffset) {
    Message message = messageAndOffset.message();
    byte[] keyBytes = toBytes(message.key());
    byte[] valueBytes = toBytes(message.payload());

    return new PartitionConsumerRecord<>(
            topicPartition.topic(),
            topicPartition.partition(),
            decodeNullable(keyDecoder, keyBytes),
            decodeNullable(valueDecoder, valueBytes),
            messageAndOffset.offset()
    );
}
 
開發者ID:researchgate,項目名稱:kafka-metamorph,代碼行數:14,代碼來源:PartitionConsumerRecordTransformer.java

示例3: initializeLastProcessingOffset

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private void initializeLastProcessingOffset()
{
  // read last received kafka message
  TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());

  if (tm == null) {
    throw new RuntimeException("Failed to retrieve topic metadata");
  }

  partitionNum = tm.partitionsMetadata().size();

  lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);

  for (PartitionMetadata pm : tm.partitionsMetadata()) {

    String leadBroker = pm.leader().host();
    int port = pm.leader().port();
    String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
    SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);

    long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);

    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();

    FetchResponse fetchResponse = consumer.fetch(req);
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {

      Message m = messageAndOffset.message();

      ByteBuffer payload = m.payload();
      ByteBuffer key = m.key();
      byte[] valueBytes = new byte[payload.limit()];
      byte[] keyBytes = new byte[key.limit()];
      payload.get(valueBytes);
      key.get(keyBytes);
      lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
    }
  }
}
 
開發者ID:apache,項目名稱:apex-malhar,代碼行數:40,代碼來源:AbstractExactlyOnceKafkaOutputOperator.java

示例4: getNext

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * Fetches the next Kafka message and stuffs the results into the key and
 * value
 *
 * @param key
 * @param payload
 * @param pKey
 * @return true if there exists more events
 * @throws IOException
 */
public boolean getNext(KafkaKey key, BytesWritable payload ,BytesWritable pKey) throws IOException {
  if (hasNext()) {

    MessageAndOffset msgAndOffset = messageIter.next();
    Message message = msgAndOffset.message();

    ByteBuffer buf = message.payload();
    int origSize = buf.remaining();
    byte[] bytes = new byte[origSize];
    buf.get(bytes, buf.position(), origSize);
    payload.set(bytes, 0, origSize);

    buf = message.key();
    if(buf != null){
      origSize = buf.remaining();
      bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
      pKey.set(bytes, 0, origSize);
    }

    key.clear();
    key.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(),
        kafkaRequest.getPartition(), currentOffset,
        msgAndOffset.offset() + 1, message.checksum());

    key.setMessageSize(msgAndOffset.message().size());

    currentOffset = msgAndOffset.offset() + 1; // increase offset
    currentCount++; // increase count

    return true;
  } else {
    return false;
  }
}
 
開發者ID:HiveKa,項目名稱:HiveKa,代碼行數:46,代碼來源:KafkaReader.java

示例5: getCurrentMessagePayload

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
byte[] getCurrentMessagePayload() {
  while(currentMessageSetIterator.hasNext()) {
    MessageAndOffset messageAndOffset = currentMessageSetIterator.next();
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    Message message = messageAndOffset.message();
    ByteBuffer payload = message.payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    currentOffset = messageAndOffset.nextOffset();
    return bytes;
  }
  return null;
}
 
開發者ID:DemandCube,項目名稱:Scribengin,代碼行數:14,代碼來源:KafkaPartitionReader.java

示例6: send_should_send_messsage_to_kafka

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
@Ignore("Integration test, requires a zookeeper and Kafka. Un-Ignore to test manually")
@Test
public void send_should_send_messsage_to_kafka() {

    int port = 9092;

    // Create the buffer, put the message in there and then read
    when(client.isDone()).thenReturn(false, true);
    int numberOfMessages = 1000;
    LinkedBlockingQueue<String> buffer = new LinkedBlockingQueue<>(numberOfMessages);

    for(int i = 0; i < numberOfMessages; ++i) {
        buffer.add("MESSAGE" + i);
    }

    HosebirdReader tr = new ConcreteHosebirdReader();
    Producer<String, String> producer = tr.getKafkaProducer(new KafkaConfig());
    tr.readAndSend(buffer, "TOPIC", producer, this.clientReadAndSendPredicate);
    producer.close();

    System.out.println("Messages sent");

    // Consume one message from Kafka:
    SimpleConsumer consumer = new SimpleConsumer("localhost", port, 10000, 1024000, "CLIENT");

    FetchRequest req = new FetchRequestBuilder()
            .clientId("CLIENT")
            .addFetch("TOPIC", 0, 0, 100000)
            .build();

    FetchResponse fetchResponse = consumer.fetch(req);
    int count = 0;

    for(MessageAndOffset msg : fetchResponse.messageSet("TOPIC", 0)) {
        Message m = msg.message();
        ByteBuffer bb = m.payload();
        CharBuffer cb = StandardCharsets.UTF_8.decode(bb);
        assertTrue(cb.toString().startsWith("MESSAGE"));
        count++;
    }

    // TODO either clear the topic at the start of the test or
    //      check how many message are in the topic.
    assertTrue(count > 1);
    consumer.close();
}
 
開發者ID:datasift,項目名稱:datasift-connector,代碼行數:47,代碼來源:TestHosebirdReader.java

示例7: readEvents

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * 	read events.
 * 
 * any errors occurred druing the read process are wrapped as KafkaPartitionReaderException which contains the error code
 * the exception should be processed by consumer.
 * 
 * @return
 * @throws KafkaPartitionReaderException
 */
public List<MessageAndMetadata<byte[],byte[]>> readEvents() throws KafkaPartitionReaderException {
	List<MessageAndMetadata<byte[],byte[]> > events = new ArrayList<MessageAndMetadata<byte[],byte[]>>();
	if(isClosed()){
		return events;
	}
	//LOG.log("Start Reading PartitionReader from ["+readOffset+"] once, Topic["+topic+"] partition["+partition+"]");
	if (nextBatchSizeBytes < 0)
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();

	if (nextBatchSizeBytes == 0) {
		// nextBatchSize only affects one fetch
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
		return events;
	}

	boolean  hasMessage=false;
	ByteBufferMessageSet messageSet=null;
	do{
		FetchRequest req = new FetchRequestBuilder()
		.clientId(clientId)
		.addFetch(topic, partition, readOffset,
				nextBatchSizeBytes).build();

		FetchResponse fetchResponse = null;
		fetchResponse = consumer.fetch(req);
		if (fetchResponse.hasError()) {
			short code = fetchResponse.errorCode(topic, partition);
			throw new KafkaPartitionReaderException(code);
		} else {
			messageSet = fetchResponse.messageSet(topic, partition);
			hasMessage = messageSet.iterator().hasNext();
			if(!hasMessage)
			nextBatchSizeBytes = Math.min(
					nextBatchSizeBytes * 2,config.fetchMessageMaxBytes()
					/*config.getMaxBatchSizeBytes()*/);
		}
	}while(!hasMessage && !readToTheEnd());//TODO: test readToTheEnd() , consider the config.getMaxBatchSizeBytes().
	if(!hasMessage){
		//set this reader on idle.
		onIdle();
		nextBatchSizeBytes =config.fetchMinBytes();// config.getBatchSizeBytes();
		return events;//return empty events.
	}
	for (MessageAndOffset messageAndOffset : messageSet) {
		long currentOffset = messageAndOffset.offset();
		if (currentOffset < readOffset) {
			continue;
		}
		readOffset = messageAndOffset.nextOffset();
		Message message = messageAndOffset.message();
		MessageAndMetadata<byte[],byte[]> mam=new MessageAndMetadata<byte[],byte[]>(topic, partition, message, readOffset, decoder, decoder);
		events.add(mam);
	
	}
	// nextBatchSize only affects one fetch
	nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
	return events;
}
 
開發者ID:pulsarIO,項目名稱:druid-kafka-ext,代碼行數:68,代碼來源:ConsumerPartitionReader.java


注:本文中的kafka.message.MessageAndOffset.message方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。