當前位置: 首頁>>代碼示例>>Java>>正文


Java MessageAndOffset.offset方法代碼示例

本文整理匯總了Java中kafka.message.MessageAndOffset.offset方法的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndOffset.offset方法的具體用法?Java MessageAndOffset.offset怎麽用?Java MessageAndOffset.offset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在kafka.message.MessageAndOffset的用法示例。


在下文中一共展示了MessageAndOffset.offset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	final String topic = "test2";
	String clientId = "LowLevelConsumerClient1";
	SimpleConsumer simpleConsumer = new SimpleConsumer(
			"192.168.1.186", 9092, 6000000, 64 * 1000000, clientId);
	FetchRequest req = new FetchRequestBuilder().clientId(clientId)
							.addFetch(topic, 0, 0L, 1000000)
							.addFetch(topic, 1, 0L, 1000000)
							.addFetch(topic, 2, 0L, 1000000)
							.build();
	FetchResponse rep = simpleConsumer.fetch(req);						
	ByteBufferMessageSet messageSet = rep.messageSet(topic, 0);
	for(MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset : " + offset + ", Payload : " + new String(bytes, "UTF-8"));
	}
}
 
開發者ID:walle-liao,項目名稱:jaf-examples,代碼行數:21,代碼來源:LowLevelConsumerDemo.java

示例2: main

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	final String topic = "topic1";
	String clientID = "DemoLowLevelConsumer1";
	SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
	FetchRequest req = new FetchRequestBuilder().clientId(clientID)
			.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
	FetchResponse fetchResponse = simpleConsumer.fetch(req);
	ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
	}
}
 
開發者ID:habren,項目名稱:KafkaExample,代碼行數:17,代碼來源:DemoLowLevelConsumer.java

示例3: createFetchedMessages

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
 * the offset while iterating.
 */
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
  final Iterator<MessageAndOffset> messages = messageSet.iterator();
  return new AbstractIterator<FetchedMessage>() {
    @Override
    protected FetchedMessage computeNext() {
      while (messages.hasNext()) {
        MessageAndOffset message = messages.next();
        long msgOffset = message.offset();
        if (msgOffset < offset.get()) {
          LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
                    msgOffset, offset.get(), topicPart);
          continue;
        }

        fetchedMessage.setPayload(message.message().payload());
        fetchedMessage.setOffset(message.offset());
        fetchedMessage.setNextOffset(message.nextOffset());

        return fetchedMessage;
      }
      return endOfData();
    }
  };
}
 
開發者ID:apache,項目名稱:twill,代碼行數:29,代碼來源:SimpleKafkaConsumer.java

示例4: run

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
@Override
  public void run()
  {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
      FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();

//      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
        receiveCount++;
      }
    }
  }
 
開發者ID:apache,項目名稱:apex-malhar,代碼行數:24,代碼來源:KafkaSimpleConsumer.java

示例5: advanceNextPosition

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
@Override
public boolean advanceNextPosition()
{
    while (true) {
        if (cursorOffset >= split.getEnd()) {
            return endOfData(); // Split end is exclusive.
        }
        // Create a fetch request
        openFetchRequest();

        while (messageAndOffsetIterator.hasNext()) {
            MessageAndOffset currentMessageAndOffset = messageAndOffsetIterator.next();
            long messageOffset = currentMessageAndOffset.offset();

            if (messageOffset >= split.getEnd()) {
                return endOfData(); // Past our split end. Bail.
            }

            if (messageOffset >= cursorOffset) {
                return nextRow(currentMessageAndOffset);
            }
        }
        messageAndOffsetIterator = null;
    }
}
 
開發者ID:y-lan,項目名稱:presto,代碼行數:26,代碼來源:KafkaRecordSet.java

示例6: createFetchedMessages

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
 * the offset while iterating.
 */
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
  final Iterator<MessageAndOffset> messages = messageSet.iterator();
  return new AbstractIterator<FetchedMessage>() {
    @Override
    protected FetchedMessage computeNext() {
      while (messages.hasNext()) {
        MessageAndOffset message = messages.next();
        long msgOffset = message.offset();
        if (msgOffset < offset.get()) {
          LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
                    msgOffset, offset.get(), topicPart);
          continue;
        }

        offset.set(message.nextOffset());
        fetchedMessage.setPayload(message.message().payload());
        fetchedMessage.setNextOffset(offset.get());

        return fetchedMessage;
      }
      return endOfData();
    }
  };
}
 
開發者ID:chtyim,項目名稱:incubator-twill,代碼行數:29,代碼來源:SimpleKafkaConsumer.java

示例7: get

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
protected boolean get(KafkaETLKey key, BytesWritable value) throws IOException {
    if (_messageIt != null && _messageIt.hasNext()) {
        MessageAndOffset messageAndOffset = _messageIt.next();
        
        ByteBuffer buf = messageAndOffset.message().payload();
        int origSize = buf.remaining();
        byte[] bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
        value.set(bytes, 0, origSize);
        
        key.set(_index, _offset, messageAndOffset.message().checksum());
        
        _offset = messageAndOffset.offset();  //increase offset
        _count ++;  //increase count
        
        return true;
    }
    else return false;
}
 
開發者ID:yanfang724,項目名稱:hadoop-consumer,代碼行數:20,代碼來源:KafkaETLContext.java

示例8: emitPartitionBatchNew

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public static BatchMeta emitPartitionBatchNew(KafkaConfig config, int partition, SimpleConsumer consumer, TransactionAttempt attempt, BatchOutputCollector collector, BatchMeta lastMeta) {
    long offset = 0;
    if(lastMeta!=null) {
        offset = lastMeta.nextOffset;
    }
    ByteBufferMessageSet msgs;
    try {
       msgs = consumer.fetch(new FetchRequest(config.topic, partition % config.partitionsPerHost, offset, config.fetchSizeBytes));
    } catch(Exception e) {
        if(e instanceof ConnectException) {
            throw new FailedFetchException(e);
        } else {
            throw new RuntimeException(e);
        }
    }
    long endoffset = offset;
    for(MessageAndOffset msg: msgs) {
        emit(config, attempt, collector, msg.message());
        endoffset = msg.offset();
    }
    BatchMeta newMeta = new BatchMeta();
    newMeta.offset = offset;
    newMeta.nextOffset = endoffset;
    return newMeta;
}
 
開發者ID:YinYanfei,項目名稱:CadalWorkspace,代碼行數:26,代碼來源:KafkaUtils.java

示例9: main

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public static void main(String[] args) {
		Properties props = new Properties();
//	props.put("zk.connect","10.15.62.76:2181");
//		props.put("groupid","testgroup");
		
		SimpleConsumer consumer = new SimpleConsumer("10.15.62.70",9092,10000,1024000);
		long offset =  0; 
		int count = 0;
		String str1 = "";
	//	while(true){
			FetchRequest fetchRequest  = new FetchRequest("topic1114",3,offset,10000000);//���һ��������һ���������ݵ������byte
			ByteBufferMessageSet messages = consumer.fetch(fetchRequest);
			for(MessageAndOffset msg  :messages){
				count++;
				ByteBuffer buffer = msg.message().payload();
				byte[] bytes = new byte[buffer.remaining()];
				buffer.get(bytes);
				String str = new String(bytes);
				System.out.println(str);
				offset = msg.offset();
				System.out.println("offset: " + offset);
			}
			System.out.println("------------ count= " + count);
	//	}
	}
 
開發者ID:YinYanfei,項目名稱:CadalWorkspace,代碼行數:26,代碼來源:mySimpleConsumer.java

示例10: transform

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public PartitionConsumerRecord<K, V> transform(TopicPartition topicPartition, MessageAndOffset messageAndOffset) {
    Message message = messageAndOffset.message();
    byte[] keyBytes = toBytes(message.key());
    byte[] valueBytes = toBytes(message.payload());

    return new PartitionConsumerRecord<>(
            topicPartition.topic(),
            topicPartition.partition(),
            decodeNullable(keyDecoder, keyBytes),
            decodeNullable(valueDecoder, valueBytes),
            messageAndOffset.offset()
    );
}
 
開發者ID:researchgate,項目名稱:kafka-metamorph,代碼行數:14,代碼來源:PartitionConsumerRecordTransformer.java

示例11: fillMessages

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private void fillMessages() {

        ByteBufferMessageSet msgs;
        try {
            long start = System.currentTimeMillis();
            msgs = consumer.fetchMessages(partition, emittingOffset + 1);
            
            if (msgs == null) {
                LOG.error("fetch null message from offset {}", emittingOffset);
                return;
            }
            
            int count = 0;
            for (MessageAndOffset msg : msgs) {
                count += 1;
                emittingMessages.add(msg);
                emittingOffset = msg.offset();
                pendingOffsets.add(emittingOffset);
                LOG.debug("fillmessage fetched a message:{}, offset:{}", msg.message().toString(), msg.offset());
            }
            long end = System.currentTimeMillis();
            LOG.info("fetch message from partition:"+partition+", offset:" + emittingOffset+", size:"+msgs.sizeInBytes()+", count:"+count +", time:"+(end-start));
        } catch (Exception e) {
            e.printStackTrace();
            LOG.error(e.getMessage(),e);
        }
    }
 
開發者ID:zhangjunfang,項目名稱:jstorm-0.9.6.3-,代碼行數:28,代碼來源:PartitionConsumer.java

示例12: getNext

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * Fetches the next Kafka message and stuffs the results into the key and
 * value
 *
 * @param key
 * @param payload
 * @param pKey
 * @return true if there exists more events
 * @throws IOException
 */
public boolean getNext(KafkaKey key, BytesWritable payload ,BytesWritable pKey) throws IOException {
  if (hasNext()) {

    MessageAndOffset msgAndOffset = messageIter.next();
    Message message = msgAndOffset.message();

    ByteBuffer buf = message.payload();
    int origSize = buf.remaining();
    byte[] bytes = new byte[origSize];
    buf.get(bytes, buf.position(), origSize);
    payload.set(bytes, 0, origSize);

    buf = message.key();
    if(buf != null){
      origSize = buf.remaining();
      bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
      pKey.set(bytes, 0, origSize);
    }

    key.clear();
    key.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(),
        kafkaRequest.getPartition(), currentOffset,
        msgAndOffset.offset() + 1, message.checksum());

    key.setMessageSize(msgAndOffset.message().size());

    currentOffset = msgAndOffset.offset() + 1; // increase offset
    currentCount++; // increase count

    return true;
  } else {
    return false;
  }
}
 
開發者ID:HiveKa,項目名稱:HiveKa,代碼行數:46,代碼來源:KafkaReader.java

示例13: filterAndDecode

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private Iterable<BytesMessageWithOffset> filterAndDecode(Iterable<MessageAndOffset> kafkaMessages, long offset) {
    List<BytesMessageWithOffset> ret = new LinkedList<>();
    for (MessageAndOffset msgAndOffset: kafkaMessages) {
        if (msgAndOffset.offset() >= offset) {
            byte[] payload = decoder.fromMessage(msgAndOffset.message());
            // add nextOffset here, thus next fetch will use nextOffset instead of current offset
            ret.add(new BytesMessageWithOffset(payload, msgAndOffset.nextOffset()));
        }
    }
    return ret;
}
 
開發者ID:lyogavin,項目名稱:Pistachio,代碼行數:12,代碼來源:KafkaSimpleConsumer.java

示例14: fill

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private void fill() {
	SimpleConsumer consumer = _partitions.getConsumer(_partition);
	
	ByteBufferMessageSet msgs = consumer.fetch(new FetchRequest(_spoutConfig.topic, _partitions.getHostPartition(_partition), _emittedToOffset, _spoutConfig.fetchSizeBytes));
	
	for (MessageAndOffset msg : msgs) {
		_pending.add(actualOffset(msg));
		_waitingToEmit.add(msg);
		_emittedToOffset = msg.offset();
	}
}
 
開發者ID:YinYanfei,項目名稱:CadalWorkspace,代碼行數:12,代碼來源:KafkaSpout.java

示例15: getCurrentMessagePayload

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
byte[] getCurrentMessagePayload() {
  while(currentMessageSetIterator.hasNext()) {
    MessageAndOffset messageAndOffset = currentMessageSetIterator.next();
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    Message message = messageAndOffset.message();
    ByteBuffer payload = message.payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    currentOffset = messageAndOffset.nextOffset();
    return bytes;
  }
  return null;
}
 
開發者ID:DemandCube,項目名稱:Scribengin,代碼行數:14,代碼來源:KafkaPartitionReader.java


注:本文中的kafka.message.MessageAndOffset.offset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。