本文整理汇总了Java中kafka.message.MessageAndMetadata.partition方法的典型用法代码示例。如果您正苦于以下问题:Java MessageAndMetadata.partition方法的具体用法?Java MessageAndMetadata.partition怎么用?Java MessageAndMetadata.partition使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.message.MessageAndMetadata
的用法示例。
在下文中一共展示了MessageAndMetadata.partition方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: KafkaIndexingManager
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
@Override
public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
LindenIndexRequest indexRequest = null;
long offset = messageAndMetadata.offset();
long partition = messageAndMetadata.partition();
String message = new String(messageAndMetadata.message());
try {
indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
} catch (IOException e) {
LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
}
return indexRequest;
}
}, shardingStrategy);
}
示例2: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
Iote2eRequestReuseItem iote2eRequestReuseItem = new Iote2eRequestReuseItem();
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
try {
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType() +
", iote2eRequest=" + iote2eRequestReuseItem.fromByteArray(messageAndMetadata.message()).toString();
logger.info(">>> Consumed: " + summary);
} catch( Exception e ) {
logger.error(e.getMessage(), e);
}
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例3: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
logger.info(">>> Consumed: " + summary);
}
logger.info(">>> Shutting down Thread: " + threadNumber);
}
示例4: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(User.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
User user = genericRecordToUser(recordInjection.invert(messageAndMetadata.message()).get());
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + m_threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", user=" + user.toString() + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + m_threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+m_threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例5: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String( messageAndMetadata.key() );
String message = new String( messageAndMetadata.message() );
String summary =
"Thread " + threadNumber +
", topic=" + messageAndMetadata.topic() +
", partition=" + messageAndMetadata.partition() +
", key=" + key +
", message=" + message +
", offset=" + messageAndMetadata.offset() +
", timestamp=" + messageAndMetadata.timestamp() +
", timestampType=" + messageAndMetadata.timestampType();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
}
示例6: next
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
public MessageAndMetadata<byte[],byte[]> next() {
readCount.incrementAndGet();
lock.lock();
try {
while(queue.size()==0){
notEmpty.await();
}
MessageAndMetadata<byte[], byte[]> t= queue.poll();//queue.take();
notFull.signalAll();
TopicAndPartition key = new TopicAndPartition(t.topic(), t.partition());
PartitionReader reader=partitionMap.get(key);
if(reader!=null)
{
commitCount.incrementAndGet();
reader.getAndSetCommitOffset(t.offset());
return t;
}
} catch (Exception e) {
LOG.log("Read Message next() exception:",e);
}finally{
lock.unlock();
}
return null;
}
示例7: read
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
public MessageAndOffset read() throws StageException {
try {
//has next blocks indefinitely if consumer.timeout.ms is set to -1
//But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
//if no message is written to kafka topic in that time.
if(consumerIterator.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
byte[] message = messageAndMetadata.message();
long offset = messageAndMetadata.offset();
int partition = messageAndMetadata.partition();
return new MessageAndOffset(message, offset, partition);
}
return null;
} catch (ConsumerTimeoutException e) {
/*For high level consumer the fetching logic is handled by a background
fetcher thread and is hidden from user, for either case of
1) broker down or
2) no message is available
the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
data into the buffer until timeout. So in a sentence the high-level consumer design is to
not let users worry about connect / reconnect issues.*/
return null;
}
}
示例8: receive
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Override
public BaseConsumerRecord receive() {
if (!_iter.hasNext())
return null;
MessageAndMetadata<String, String> record = _iter.next();
return new BaseConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.message());
}
示例9: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
BinaryDecoder binaryDecoder = null;
Weather weatherRead = null;
DatumReader<Weather> datumReaderWeather = new SpecificDatumReader<Weather>(Weather.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
weatherRead = datumReaderWeather.read(weatherRead, binaryDecoder);
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = "Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType()
+ ", weatherRead=" + weatherRead.toString();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例10: run
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public void run() {
try {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
BinaryDecoder binaryDecoder = null;
Wave waveRead = null;
DatumReader<Wave> datumReaderWave = new SpecificDatumReader<Wave>(Wave.getClassSchema());
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
String key = new String(messageAndMetadata.key());
binaryDecoder = DecoderFactory.get().binaryDecoder(messageAndMetadata.message(), binaryDecoder);
waveRead = datumReaderWave.read(waveRead, binaryDecoder);
// User user = (User)
// recordInjection.invert(messageAndMetadata.message()).get();
String summary = ">>> CONSUMER: Thread " + threadNumber + ", topic=" + messageAndMetadata.topic() + ", partition="
+ messageAndMetadata.partition() + ", key=" + key + ", offset="
+ messageAndMetadata.offset() + ", timestamp=" + messageAndMetadata.timestamp()
+ ", timestampType=" + messageAndMetadata.timestampType()
+ ", waveRead=" + waveRead.toString();
System.out.println(summary);
}
System.out.println("Shutting down Thread: " + threadNumber);
} catch (Exception e) {
System.out.println("Exception in thread "+threadNumber);
System.out.println(e);
e.printStackTrace();
}
}
示例11: testNormalReadWithMultipleConsumers
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
/**
---5 part, 5 consumers:
total consumed msg: 20151 partition num: [3]
total consumed msg: 19891 partition num: [2]
total consumed msg: 20004 partition num: [1]
total consumed msg: 20029 partition num: [0]
total consumed msg: 19925 partition num: [4]
---5 part, 3 consumers:
total consumed msg: 40155 partition num: [1, 3]
total consumed msg: 39920 partition num: [0, 2]
total consumed msg: 19925 partition num: [4]
---5 part, 7 consumers:
total consumed msg: 20151 partition num: [3]
total consumed msg: 19891 partition num: [2]
total consumed msg: 20004 partition num: [1]
total consumed msg: 20029 partition num: [0]
total consumed msg: 19925 partition num: [4]
total consumed msg: 0 partition num: []
total consumed msg: 0 partition num: []
*/
@Test
public void testNormalReadWithMultipleConsumers(){
long count = 0;
final SimpleConsumerEx consumer=kafkaController.createConsumer(TOPIC);
Iterator<MessageAndMetadata<byte[], byte[]>> itr=consumer.iterator();
long start = System.currentTimeMillis();
Set<Integer> partSet = new HashSet<Integer>();
while(true){
while(itr.hasNext()){
MessageAndMetadata<byte[], byte[]> itm=itr.next();
int part = itm.partition();
partSet.add(part);
if(count == 0){
System.out.println("topic="+itm.topic()+",a.partition="+itm.partition()+",a.offset="+itm.offset());;
}
count++;
}
long end = System.currentTimeMillis();
if(end-start > 100*1000 || count == TOTAL_MSG_COUNT){
break;
}
}
System.out.println("total consumed msg: " +count);
System.out.println("partition num: "+ partSet);
}
示例12: testSingleConsumerLostZooKeeper
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Test
public void testSingleConsumerLostZooKeeper() throws Exception{
long count = 0;
final SimpleConsumerEx consumer=kafkaController.createConsumer(TOPIC);
Iterator<MessageAndMetadata<byte[], byte[]>> itr=consumer.iterator();
long start = System.currentTimeMillis();
Set<Integer> partSet = new HashSet<Integer>();
while(true){
while(itr.hasNext()){
MessageAndMetadata<byte[], byte[]> itm=itr.next();
int part = itm.partition();
partSet.add(part);
if(count == 0){
System.out.println("topic="+itm.topic()+",a.partition="+itm.partition()+",a.offset="+itm.offset());;
}
if(count == TOTAL_MSG_COUNT/3){
System.out.println("about to sleep 10s, kill zk");
Thread.sleep(10*1000);
}
count++;
}
long end = System.currentTimeMillis();
if(end-start > 200*1000 || count == TOTAL_MSG_COUNT){
break;
}
}
System.out.println("total consumed msg: " +count);
System.out.println("partition num: "+ partSet);
}
示例13: testRebalance_FUllGC
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
@Test
public void testRebalance_FUllGC() throws Exception{
kafkaController = new SimpleConsumerController(new KafkaConsumerConfig(prop));
long count = 0;
final SimpleConsumerEx consumer=kafkaController.createConsumer(TOPIC);
Iterator<MessageAndMetadata<byte[], byte[]>> itr=consumer.iterator();
long start = System.currentTimeMillis();
Set<Integer> partSet = new HashSet<Integer>();
while(true){
while(itr.hasNext()){
MessageAndMetadata<byte[], byte[]> itm=itr.next();
int part = itm.partition();
partSet.add(part);
if(count == 0){
System.out.println("topic="+itm.topic()+",a.partition="+itm.partition()+",a.offset="+itm.offset());;
}
if(count == 5000){
System.out.println("this consumer exit, partition: "+part);
System.out.println("msg count: "+ count);
System.out.println("try to trigger full gc ");
FullGc.main(null);
}
count++;
}
long end = System.currentTimeMillis();
if(end-start > 200*1000 || count == TOTAL_MSG_COUNT){
break;
}
}
consumer.markCommitOffset();
kafkaController.stop();
System.out.println("total consumed msg: " +count);
System.out.println("partition num: "+ partSet);
}
示例14: read
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
private void read(final KafkaStream<String, String> stream) {
while (stream.iterator().hasNext()) {
final int phase = phaser.register();
final MessageAndMetadata<String, String> msg = stream.iterator().next();
final long offset = msg.offset();
final long partition = msg.partition();
unacknowledgedOffsets.add(offset);
lastCommittedOffset.compareAndSet(0, offset);
currentPartition.compareAndSet(-1, partition);
final String jsonString = msg.message();
handler.handle(configuration.getVertxAddress(), jsonString, () -> {
unacknowledgedOffsets.remove(offset);
phaser.arriveAndDeregister();
});
if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged()
|| partititionChanged(partition)
|| tooManyUncommittedOffsets(offset)
|| commitTimeoutReached()) {
LOG.info("Got {} unacknowledged messages, waiting for ACKs in order to commit", unacknowledgedOffsets.size());
if (!waitForAcks(phase)) {
return;
}
commitOffsetsIfAllAcknowledged(offset);
LOG.info("Continuing message processing");
}
}
}
示例15: Message
import kafka.message.MessageAndMetadata; //导入方法依赖的package包/类
public Message(MessageAndMetadata<byte[], byte[]> message) {
this.topic = message.topic();
this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null;
this.message = new String(message.message(), Charset.forName("utf-8"));
this.partition = message.partition();
this.offset = message.offset();
}