本文整理汇总了Java中org.springframework.cloud.stream.binder.ExtendedProducerProperties.setHeaderMode方法的典型用法代码示例。如果您正苦于以下问题:Java ExtendedProducerProperties.setHeaderMode方法的具体用法?Java ExtendedProducerProperties.setHeaderMode怎么用?Java ExtendedProducerProperties.setHeaderMode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.springframework.cloud.stream.binder.ExtendedProducerProperties
的用法示例。
在下文中一共展示了ExtendedProducerProperties.setHeaderMode方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: provisionProducerDestination
import org.springframework.cloud.stream.binder.ExtendedProducerProperties; //导入方法依赖的package包/类
@Override
public ProducerDestination provisionProducerDestination(String name,
ExtendedProducerProperties<KinesisProducerProperties> properties) throws ProvisioningException {
if (logger.isInfoEnabled()) {
logger.info("Using Kinesis stream for outbound: " + name);
}
if (properties.getHeaderMode() == null) {
properties.setHeaderMode(HeaderMode.embeddedHeaders);
}
return new KinesisProducerDestination(name, createOrUpdate(name, properties.getPartitionCount()));
}
开发者ID:spring-cloud,项目名称:spring-cloud-stream-binder-aws-kinesis,代码行数:15,代码来源:KinesisStreamProvisioner.java
示例2: testPartitionedModuleJavaWithRawMode
import org.springframework.cloud.stream.binder.ExtendedProducerProperties; //导入方法依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testPartitionedModuleJavaWithRawMode() throws Exception {
Binder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setHeaderMode(HeaderMode.none);
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionCount(6);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.raw.0", output, properties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.none);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.raw.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1J");
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.raw.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.raw.0", "test", input2, consumerProperties);
output.send(new GenericMessage<>(new byte[]{(byte) 0}));
output.send(new GenericMessage<>(new byte[]{(byte) 1}));
output.send(new GenericMessage<>(new byte[]{(byte) 2}));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
示例3: testPartitionedModuleSpELWithRawMode
import org.springframework.cloud.stream.binder.ExtendedProducerProperties; //导入方法依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testPartitionedModuleSpELWithRawMode() throws Exception {
Binder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
properties.setPartitionCount(6);
properties.setHeaderMode(HeaderMode.none);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.raw.0", output, properties);
try {
Object endpoint = extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint))
.contains(getExpectedRoutingBaseDestination("part.raw.0", "test") + "-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceIndex(0);
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.none);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.raw.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1S");
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.raw.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.raw.0", "test", input2, consumerProperties);
Message<byte[]> message2 = org.springframework.integration.support.MessageBuilder.withPayload(new byte[]{2})
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "kafkaBinderTestCommonsDelegate")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(new byte[]{1}));
output.send(new GenericMessage<>(new byte[]{0}));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
示例4: testSendAndReceiveWithRawMode
import org.springframework.cloud.stream.binder.ExtendedProducerProperties; //导入方法依赖的package包/类
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
public void testSendAndReceiveWithRawMode() throws Exception {
Binder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.none);
DirectChannel moduleOutputChannel = createBindableChannel("output",
createProducerBindingProperties(producerProperties));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.none);
DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties));
Binding<MessageChannel> producerBinding = binder.bindProducer("raw.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("raw.0", "test", moduleInputChannel,
consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder
.withPayload("testSendAndReceiveWithRawMode".getBytes()).build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Message<String>> inboundMessageRef = new AtomicReference<>();
moduleInputChannel.subscribe(message1 -> {
try {
inboundMessageRef.set((Message<String>) message1);
}
finally {
latch.countDown();
}
});
Assert.isTrue(latch.await(5, TimeUnit.SECONDS), "Failed to receive message");
assertThat(inboundMessageRef.get()).isNotNull();
assertThat(inboundMessageRef.get().getPayload()).isEqualTo("testSendAndReceiveWithRawMode");
producerBinding.unbind();
consumerBinding.unbind();
}