本文整理匯總了Java中org.apache.kafka.streams.KafkaStreams.setUncaughtExceptionHandler方法的典型用法代碼示例。如果您正苦於以下問題:Java KafkaStreams.setUncaughtExceptionHandler方法的具體用法?Java KafkaStreams.setUncaughtExceptionHandler怎麽用?Java KafkaStreams.setUncaughtExceptionHandler使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.streams.KafkaStreams
的用法示例。
在下文中一共展示了KafkaStreams.setUncaughtExceptionHandler方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: worker
import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
@Override
public ReadOnlyKeyValueStore<Long, byte[]> worker() {
Properties config = super.configBuilder()//
.put(StreamsConfig.APPLICATION_ID_CONFIG, MallConstants.ORDER_COMMITED_TOPIC)//
.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrap)//
.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Long().getClass())//
.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass())//
.build();
StreamsBuilder builder = new StreamsBuilder();
KafkaStreams streams = new KafkaStreams(builder.build(), new StreamsConfig(config));
streams.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
// TODO Auto-generated method stub
log.error(e.getMessage());
});
streams.start();
return this.worker = // k-v query
streams.store(queryableStoreName, QueryableStoreTypes.<Long, byte[]>keyValueStore());
}
示例2: configureStream
import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
private KafkaStreams configureStream(final StatisticType statisticType,
final AbstractStatisticFlatMapper mapper) {
Map<String, Object> props = new HashMap<>();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, appId);
//TODO need to specify number of threads in the yml as it could be box specific
streamThreads = getStreamThreads();
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, streamThreads);
StreamsConfig streamsConfig = buildStreamsConfig(appId, props);
KafkaStreams flatMapProcessor = statisticsFlatMappingStreamFactory.buildStream(
streamsConfig,
inputTopic,
badEventTopic,
permsTopicsPrefix,
mapper);
flatMapProcessor.setUncaughtExceptionHandler(buildUncaughtExceptionHandler(appId, statisticType, mapper));
return flatMapProcessor;
}
示例3: shouldThrowStreamsExceptionNoResetSpecified
import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
@Test
public void shouldThrowStreamsExceptionNoResetSpecified() throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
Properties localConfig = StreamsTestUtils.getStreamsConfig(
"testAutoOffsetWithNone",
CLUSTER.bootstrapServers(),
STRING_SERDE_CLASSNAME,
STRING_SERDE_CLASSNAME,
props);
final KStreamBuilder builder = new KStreamBuilder();
final KStream<String, String> exceptionStream = builder.stream(NOOP);
exceptionStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
KafkaStreams streams = new KafkaStreams(builder, localConfig);
final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler();
final TestCondition correctExceptionThrownCondition = new TestCondition() {
@Override
public boolean conditionMet() {
return uncaughtExceptionHandler.correctExceptionThrown;
}
};
streams.setUncaughtExceptionHandler(uncaughtExceptionHandler);
streams.start();
TestUtils.waitForCondition(correctExceptionThrownCondition, "The expected NoOffsetForPartitionException was never thrown");
streams.close();
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:34,代碼來源:KStreamsFineGrainedAutoResetIntegrationTest.java
示例4: createKafkaStreamsWithExceptionHandler
import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
private KafkaStreams createKafkaStreamsWithExceptionHandler(final KStreamBuilder builder, final Properties props) {
final KafkaStreams streamsClient = new KafkaStreams(builder, props);
streamsClient.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
System.out.println("FATAL: An unexpected exception is encountered on thread " + t + ": " + e);
streamsClient.close(30, TimeUnit.SECONDS);
}
});
return streamsClient;
}
示例5: main
import org.apache.kafka.streams.KafkaStreams; //導入方法依賴的package包/類
public static void main(final String[] args) throws Exception {
System.out.println("StreamsTest instance started");
final String kafka = args.length > 0 ? args[0] : "localhost:9092";
final String stateDirStr = args.length > 1 ? args[1] : TestUtils.tempDirectory().getAbsolutePath();
final boolean eosEnabled = args.length > 2 ? Boolean.parseBoolean(args[2]) : false;
final File stateDir = new File(stateDirStr);
stateDir.mkdir();
final Properties streamsProperties = new Properties();
streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-system-test-broker-compatibility");
streamsProperties.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.toString());
streamsProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
if (eosEnabled) {
streamsProperties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
}
final int timeout = 6000;
streamsProperties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), timeout);
streamsProperties.put(StreamsConfig.consumerPrefix(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG), timeout);
streamsProperties.put(StreamsConfig.REQUEST_TIMEOUT_MS_CONFIG, timeout + 1);
final KStreamBuilder builder = new KStreamBuilder();
builder.stream(SOURCE_TOPIC).to(SINK_TOPIC);
final KafkaStreams streams = new KafkaStreams(builder, streamsProperties);
streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(final Thread t, final Throwable e) {
System.out.println("FATAL: An unexpected exception is encountered on thread " + t + ": " + e);
streams.close(30, TimeUnit.SECONDS);
}
});
System.out.println("start Kafka Streams");
streams.start();
System.out.println("send data");
final Properties producerProperties = new Properties();
producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
final KafkaProducer<String, String> producer = new KafkaProducer<>(producerProperties);
producer.send(new ProducerRecord<>(SOURCE_TOPIC, "key", "value"));
System.out.println("wait for result");
loopUntilRecordReceived(kafka, eosEnabled);
System.out.println("close Kafka Streams");
streams.close();
}