当前位置: 首页>>代码示例>>Java>>正文


Java RecordHeader类代码示例

本文整理汇总了Java中org.apache.kafka.common.header.internals.RecordHeader的典型用法代码示例。如果您正苦于以下问题:Java RecordHeader类的具体用法?Java RecordHeader怎么用?Java RecordHeader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RecordHeader类属于org.apache.kafka.common.header.internals包,在下文中一共展示了RecordHeader类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readHeaders

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) {
    Header[] headers = new Header[numHeaders];
    for (int i = 0; i < numHeaders; i++) {
        int headerKeySize = ByteUtils.readVarint(buffer);
        if (headerKeySize < 0)
            throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);

        String headerKey = Utils.utf8(buffer, headerKeySize);
        buffer.position(buffer.position() + headerKeySize);

        ByteBuffer headerValue = null;
        int headerValueSize = ByteUtils.readVarint(buffer);
        if (headerValueSize >= 0) {
            headerValue = buffer.slice();
            headerValue.limit(headerValueSize);
            buffer.position(buffer.position() + headerValueSize);
        }

        headers[i] = new RecordHeader(headerKey, headerValue);
    }

    return headers;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:DefaultRecord.java

示例2: testSizeInBytes

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Test
public void testSizeInBytes() {
    Header[] headers = new Header[] {
        new RecordHeader("foo", "value".getBytes()),
        new RecordHeader("bar", (byte[]) null)
    };

    long timestamp = System.currentTimeMillis();
    SimpleRecord[] records = new SimpleRecord[] {
        new SimpleRecord(timestamp, "key".getBytes(), "value".getBytes()),
        new SimpleRecord(timestamp + 30000, null, "value".getBytes()),
        new SimpleRecord(timestamp + 60000, "key".getBytes(), null),
        new SimpleRecord(timestamp + 60000, "key".getBytes(), "value".getBytes(), headers)
    };
    int actualSize = MemoryRecords.withRecords(CompressionType.NONE, records).sizeInBytes();
    assertEquals(actualSize, DefaultRecordBatch.sizeInBytes(Arrays.asList(records)));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:DefaultRecordBatchTest.java

示例3: reconsumeLater

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException {

		// add all header to headList except RETRY_COUNT
		Headers headers = consumeRecord.headers();
		List<Header> headerList = new ArrayList<Header>(8);
		Iterator<Header> iterator = headers.iterator();
		Integer retryCount = -1;
		boolean hasOrignalHeader = false;
		while (iterator.hasNext()) {
			Header next = iterator.next();
			if (next.key().equals(RETRY_COUNT_KEY)) {
				retryCount = serializer.deserialize(next.value());
				continue;
			}
			
			if(next.key().equals(ORGINAL_TOPIC)){
				hasOrignalHeader = true;
			}
			headerList.add(next);
		}
		
		// add RETRY_COUNT to header
		retryCount++;
		headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount)));
		
		if(!hasOrignalHeader){
			headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic())));
		}

		// send message to corresponding queue according to retry times
		String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount);
		
		ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic,
				consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(),
				consumeRecord.value(), headerList);
		Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record);
		publishKafkaMessage.get();
	}
 
开发者ID:QNJR-GROUP,项目名称:EasyTransaction,代码行数:39,代码来源:KafkaEasyTransMsgConsumerImpl.java

示例4: publish

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Override
public EasyTransMsgPublishResult publish(String topic, String tag, String key, Map<String,Object> header, byte[] msgByte) {
	String kafkaTopic = QueueKafkaHelper.getKafkaTopic(topic, tag);
	
	//calculate partition
	TransactionId trxId = (TransactionId) header.get(EasytransConstant.CallHeadKeys.PARENT_TRX_ID_KEY);
	int partition = calcMessagePartition(kafkaTopic, trxId);
	
	List<Header> kafkaHeaderList = new ArrayList<>(header.size());
	for(Entry<String, Object> entry:header.entrySet()){
		kafkaHeaderList.add(new RecordHeader(entry.getKey(),serializer.serialization(entry.getValue())));
	}
	
	ProducerRecord<String, byte[]> record = new ProducerRecord<>(kafkaTopic, partition, null, key, msgByte, kafkaHeaderList);
	Future<RecordMetadata> sendResultFuture = kafkaProducer.send(record);
	try {
		RecordMetadata recordMetadata = sendResultFuture.get();
		log.info("message sent:" + recordMetadata);
	} catch (InterruptedException | ExecutionException e) {
		throw new RuntimeException("message sent error",e);
	}
	
	EasyTransMsgPublishResult easyTransMsgPublishResult = new EasyTransMsgPublishResult();
	easyTransMsgPublishResult.setTopic(topic);
	easyTransMsgPublishResult.setMessageId(key);
	return easyTransMsgPublishResult;
}
 
开发者ID:QNJR-GROUP,项目名称:EasyTransaction,代码行数:28,代码来源:KafkaEasyTransMsgPublisherImpl.java

示例5: testBasicSerdeInvalidHeaderCountTooHigh

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
    Header[] headers = new Header[] {
        new RecordHeader("foo", "value".getBytes()),
        new RecordHeader("bar", (byte[]) null),
        new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes())
    };

    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);

    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;

    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(),
            record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 8);

    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(),
            record.headers()), logRecord.sizeInBytes());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:29,代码来源:DefaultRecordTest.java

示例6: testBasicSerdeInvalidHeaderCountTooLow

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
    Header[] headers = new Header[] {
        new RecordHeader("foo", "value".getBytes()),
        new RecordHeader("bar", (byte[]) null),
        new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes())
    };

    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);

    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;

    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(),
            record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 4);

    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(),
            record.headers()), logRecord.sizeInBytes());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:29,代码来源:DefaultRecordTest.java

示例7: testHeaders

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testHeaders() throws Exception {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    ExtendedSerializer keySerializer = PowerMock.createNiceMock(ExtendedSerializer.class);
    ExtendedSerializer valueSerializer = PowerMock.createNiceMock(ExtendedSerializer.class);

    KafkaProducer<String, String> producer = new KafkaProducer<>(props, keySerializer, valueSerializer);
    Metadata metadata = PowerMock.createNiceMock(Metadata.class);
    MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);

    String topic = "topic";
    Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000));

    final Cluster cluster = new Cluster(
            "dummy",
            Collections.singletonList(new Node(0, "host1", 1000)),
            Arrays.asList(new PartitionInfo(topic, 0, null, null, null)),
            Collections.<String>emptySet(),
            Collections.<String>emptySet());


    EasyMock.expect(metadata.fetch()).andReturn(cluster).anyTimes();

    PowerMock.replay(metadata);

    String value = "value";

    ProducerRecord<String, String> record = new ProducerRecord<>(topic, value);
    EasyMock.expect(keySerializer.serialize(topic, record.headers(), null)).andReturn(null).once();
    EasyMock.expect(valueSerializer.serialize(topic, record.headers(), value)).andReturn(value.getBytes()).once();

    PowerMock.replay(keySerializer);
    PowerMock.replay(valueSerializer);


    //ensure headers can be mutated pre send.
    record.headers().add(new RecordHeader("test", "header2".getBytes()));
    
    producer.send(record, null);
    
    //ensure headers are closed and cannot be mutated post send
    try {
        record.headers().add(new RecordHeader("test", "test".getBytes()));
        fail("Expected IllegalStateException to be raised");
    } catch (IllegalStateException ise) {
        //expected
    }
    
    //ensure existing headers are not changed, and last header for key is still original value
    assertTrue(Arrays.equals(record.headers().lastHeader("test").value(), "header2".getBytes()));

    PowerMock.verify(valueSerializer);
    PowerMock.verify(keySerializer);

}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:58,代码来源:KafkaProducerTest.java

示例8: testHeaders

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Test
public void testHeaders() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));

    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());

    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);

    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);

    MemoryRecords memoryRecords = builder.build();

    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp1));
    subscriptions.seek(tp1, 1);

    client.prepareResponse(matchesOffset(tp1, 1), fetchResponse(tp1, memoryRecords, Errors.NONE, 100L, 0));

    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp1);

    assertEquals(3, records.size());

    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();

    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));

    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());

    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:44,代码来源:FetcherTest.java

示例9: testBasicSerde

import org.apache.kafka.common.header.internals.RecordHeader; //导入依赖的package包/类
@Test
public void testBasicSerde() throws IOException {
    Header[] headers = new Header[] {
        new RecordHeader("foo", "value".getBytes()),
        new RecordHeader("bar", (byte[]) null),
        new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes())
    };

    SimpleRecord[] records = new SimpleRecord[] {
        new SimpleRecord("hi".getBytes(), "there".getBytes()),
        new SimpleRecord(null, "there".getBytes()),
        new SimpleRecord("hi".getBytes(), null),
        new SimpleRecord(null, null),
        new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers)
    };

    for (SimpleRecord record : records) {
        int baseSequence = 723;
        long baseOffset = 37;
        int offsetDelta = 10;
        long baseTimestamp = System.currentTimeMillis();
        long timestampDelta = 323;

        ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
        DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(),
                record.headers());
        ByteBuffer buffer = out.buffer();
        buffer.flip();

        DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
        assertNotNull(logRecord);
        assertEquals(baseOffset + offsetDelta, logRecord.offset());
        assertEquals(baseSequence + offsetDelta, logRecord.sequence());
        assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
        assertEquals(record.key(), logRecord.key());
        assertEquals(record.value(), logRecord.value());
        assertArrayEquals(record.headers(), logRecord.headers());
        assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(),
                record.headers()), logRecord.sizeInBytes());
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:42,代码来源:DefaultRecordTest.java


注:本文中的org.apache.kafka.common.header.internals.RecordHeader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。