本文整理汇总了Java中org.apache.pulsar.client.api.ProducerConfiguration.getSendTimeoutMs方法的典型用法代码示例。如果您正苦于以下问题:Java ProducerConfiguration.getSendTimeoutMs方法的具体用法?Java ProducerConfiguration.getSendTimeoutMs怎么用?Java ProducerConfiguration.getSendTimeoutMs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.pulsar.client.api.ProducerConfiguration
的用法示例。
在下文中一共展示了ProducerConfiguration.getSendTimeoutMs方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: PulsarKafkaProducer
import org.apache.pulsar.client.api.ProducerConfiguration; //导入方法依赖的package包/类
@SuppressWarnings({ "unchecked", "deprecation" })
private PulsarKafkaProducer(Map<String, Object> conf, Properties properties, Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
properties.forEach((k, v) -> conf.put((String) k, v));
ProducerConfig producerConfig = new ProducerConfig(conf);
if (keySerializer == null) {
this.keySerializer = producerConfig.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
Serializer.class);
this.keySerializer.configure(producerConfig.originals(), true);
} else {
this.keySerializer = keySerializer;
producerConfig.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
}
if (valueSerializer == null) {
this.valueSerializer = producerConfig.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
Serializer.class);
this.valueSerializer.configure(producerConfig.originals(), true);
} else {
this.valueSerializer = valueSerializer;
producerConfig.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
}
String serviceUrl = producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).get(0);
ClientConfiguration clientConf = PulsarKafkaConfig.getClientConfiguration(properties);
try {
client = PulsarClient.create(serviceUrl, clientConf);
} catch (PulsarClientException e) {
throw new RuntimeException(e);
}
pulsarProducerConf = new ProducerConfiguration();
pulsarProducerConf.setBatchingEnabled(true);
// To mimic the same batching mode as Kafka, we need to wait a very little amount of
// time to batch if the client is trying to send messages fast enough
long lingerMs = Long.parseLong(properties.getProperty(ProducerConfig.LINGER_MS_CONFIG, "1"));
pulsarProducerConf.setBatchingMaxPublishDelay(lingerMs, TimeUnit.MILLISECONDS);
String compressionType = properties.getProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG);
if ("gzip".equals(compressionType)) {
pulsarProducerConf.setCompressionType(CompressionType.ZLIB);
} else if ("lz4".equals(compressionType)) {
pulsarProducerConf.setCompressionType(CompressionType.LZ4);
}
pulsarProducerConf.setSendTimeout(
Integer.parseInt(properties.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, "60000")),
TimeUnit.MILLISECONDS);
boolean blockOnBufferFull = Boolean
.parseBoolean(properties.getProperty(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, "false"));
// Kafka blocking semantic when blockOnBufferFull=false is different from Pulsar client
// Pulsar throws error immediately when the queue is full and blockIfQueueFull=false
// Kafka, on the other hand, still blocks for "max.block.ms" time and then gives error.
boolean shouldBlockPulsarProducer = pulsarProducerConf.getSendTimeoutMs() > 0 || blockOnBufferFull;
pulsarProducerConf.setBlockIfQueueFull(shouldBlockPulsarProducer);
}
示例2: ProducerImpl
import org.apache.pulsar.client.api.ProducerConfiguration; //导入方法依赖的package包/类
public ProducerImpl(PulsarClientImpl client, String topic, ProducerConfiguration conf,
CompletableFuture<Producer> producerCreatedFuture, int partitionIndex) {
super(client, topic, conf, producerCreatedFuture);
this.producerId = client.newProducerId();
this.producerName = conf.getProducerName();
this.partitionIndex = partitionIndex;
this.pendingMessages = Queues.newArrayBlockingQueue(conf.getMaxPendingMessages());
this.pendingCallbacks = Queues.newArrayBlockingQueue(conf.getMaxPendingMessages());
this.semaphore = new Semaphore(conf.getMaxPendingMessages(), true);
this.compressor = CompressionCodecProvider
.getCompressionCodec(convertCompressionType(conf.getCompressionType()));
if (conf.getInitialSequenceId().isPresent()) {
long initialSequenceId = conf.getInitialSequenceId().get();
this.lastSequenceIdPublished = initialSequenceId;
this.msgIdGenerator = initialSequenceId + 1;
} else {
this.lastSequenceIdPublished = -1;
this.msgIdGenerator = 0;
}
if (conf.isEncryptionEnabled()) {
String logCtx = "[" + topic + "] [" + producerName + "] [" + producerId + "]";
this.msgCrypto = new MessageCrypto(logCtx , true);
// Regenerate data key cipher at fixed interval
keyGeneratorTask = client.eventLoopGroup().scheduleWithFixedDelay(() -> {
try {
msgCrypto.addPublicKeyCipher(conf.getEncryptionKeys(), conf.getCryptoKeyReader());
} catch (CryptoException e) {
if (!producerCreatedFuture.isDone()) {
log.warn("[{}] [{}] [{}] Failed to add public key cipher.", topic, producerName, producerId);
producerCreatedFuture.completeExceptionally(e);
}
}
}, 0L, 4L, TimeUnit.HOURS);
}
if (conf.getSendTimeoutMs() > 0) {
sendTimeout = client.timer().newTimeout(this, conf.getSendTimeoutMs(), TimeUnit.MILLISECONDS);
}
this.createProducerTimeout = System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs();
if (conf.getBatchingEnabled()) {
this.maxNumMessagesInBatch = conf.getBatchingMaxMessages();
this.batchMessageContainer = new BatchMessageContainer(maxNumMessagesInBatch,
convertCompressionType(conf.getCompressionType()), topic, producerName);
} else {
this.maxNumMessagesInBatch = 1;
this.batchMessageContainer = null;
}
if (client.getConfiguration().getStatsIntervalSeconds() > 0) {
stats = new ProducerStats(client, conf, this);
} else {
stats = ProducerStats.PRODUCER_STATS_DISABLED;
}
if (conf.getProperties().isEmpty()) {
metadata = Collections.emptyMap();
} else {
metadata = Collections.unmodifiableMap(new HashMap<>(conf.getProperties()));
}
grabCnx();
}