本文整理汇总了Java中org.apache.kafka.connect.errors.RetriableException类的典型用法代码示例。如果您正苦于以下问题:Java RetriableException类的具体用法?Java RetriableException怎么用?Java RetriableException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RetriableException类属于org.apache.kafka.connect.errors包,在下文中一共展示了RetriableException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: connectInternal
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
/**
* Internal method to connect to MQ.
*
* @throws RetriableException Operation failed, but connector should continue to retry.
* @throws ConnectException Operation failed and connector should stop.
*/
private void connectInternal() throws ConnectException, RetriableException {
if (connected) {
return;
}
try {
if (userName != null) {
jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED);
}
else {
jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED);
}
jmsProd = jmsCtxt.createProducer();
jmsProd.setDeliveryMode(deliveryMode);
jmsProd.setTimeToLive(timeToLive);
connected = true;
}
catch (JMSRuntimeException jmse) {
log.debug("JMS exception {}", jmse);
handleException(jmse);
}
}
示例2: commit
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
/**
* Commits the current transaction. If the current transaction contains a message that could not
* be processed, the transaction is "in peril" and is rolled back instead to avoid data loss.
*
* @throws RetriableException Operation failed, but connector should continue to retry.
* @throws ConnectException Operation failed and connector should stop.
*/
public void commit() throws ConnectException, RetriableException {
connectInternal();
try {
if (inflight) {
inflight = false;
if (inperil) {
inperil = false;
log.trace("Rolling back in-flight transaction");
jmsCtxt.rollback();
throw new RetriableException("Transaction rolled back");
}
else {
jmsCtxt.commit();
}
}
}
catch (JMSRuntimeException jmse) {
log.debug("JMS exception {}", jmse);
handleException(jmse);
}
}
示例3: connectInternal
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
/**
* Internal method to connect to MQ.
*
* @throws RetriableException Operation failed, but connector should continue to retry.
* @throws ConnectException Operation failed and connector should stop.
*/
private void connectInternal() throws ConnectException, RetriableException {
if (connected) {
return;
}
if (closeNow.get()) {
throw new ConnectException("Connection closing");
}
try {
if (userName != null) {
jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED);
}
else {
jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED);
}
jmsCons = jmsCtxt.createConsumer(queue);
connected = true;
}
catch (JMSRuntimeException jmse) {
log.debug("JMS exception {}", jmse);
handleException(jmse);
}
}
示例4: put
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void put(final Collection<SinkRecord> records) {
if (records.isEmpty()) {
return;
}
log.info("===>>>records size:{}", records.size());
try {
writer.batchWrite(records);
} catch (SQLException e) {
log.error(e.getMessage(), e);
if (remainRetries <= 0) {
throw new ConnectException(e);
} else {
writer.closeQuietly();
writer = new MySqlDbWriter(config);
remainRetries--;
context.timeout(config.retryBackoffMs);
throw new RetriableException(e);
}
}
remainRetries = config.maxRetries;
}
示例5: put
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void put(final Collection<SinkRecord> records) {
if (records == null || records.isEmpty()) {
return;
}
log.info("===>>>records size:{}", records.size());
try {
writer.batchWrite(records);
} catch (SQLException e) {
log.error(e.getMessage(), e);
if (remainRetries <= 0) {
throw new ConnectException(e);
} else {
writer.closeQuietly();
writer = new MySqlDbWriter(config);
remainRetries--;
context.timeout(config.retryBackoffMs);
throw new RetriableException(e);
}
}
remainRetries = config.maxRetries;
}
示例6: connectionRefused
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Test
public void connectionRefused() throws IOException {
Collection<SinkRecord> sinkRecords = new ArrayList<>();
SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com"));
SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp"));
SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp", "sourcetype", "txt", "index", "main"));
final LowLevelHttpRequest httpRequest = mock(LowLevelHttpRequest.class, CALLS_REAL_METHODS);
when(httpRequest.execute()).thenThrow(ConnectException.class);
this.task.transport = new MockHttpTransport() {
@Override
public LowLevelHttpRequest buildRequest(String method, String url) throws IOException {
return httpRequest;
}
};
this.task.httpRequestFactory = this.task.transport.createRequestFactory(this.task.httpRequestInitializer);
assertThrows(RetriableException.class, () -> this.task.put(sinkRecords));
}
示例7: put
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void put(Collection<SinkRecord> records) throws ConnectException {
for (SinkRecord record : records) {
try {
String topic = record.topic();
int partition = record.kafkaPartition();
TopicPartition tp = new TopicPartition(topic, partition);
BlockGZIPFileWriter buffer = tmpFiles.get(tp);
if (buffer == null) {
log.error("Trying to put {} records to partition {} which doesn't exist yet", records.size(), tp);
throw new ConnectException("Trying to put records for a topic partition that has not be assigned");
}
buffer.write(record.value().toString());
} catch (IOException e) {
throw new RetriableException("Failed to write to buffer", e);
}
}
}
示例8: process
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
protected HttpResponse process(final HttpRequestBase request) {
try {
return httpClient.execute(request, localContext);
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RetriableException(e);
}
}
示例9: put
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void put(final Collection<SinkRecord> records) {
if (records.isEmpty()) {
logger.debug("Empty record collection to process");
return;
}
final SinkRecord first = records.iterator().next();
final int recordsCount = records.size();
logger.debug("Received {} records. kafka coordinates from record: Topic - {}, Partition - {}, Offset - {}",
recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset());
try {
writer.write(records);
} catch (final RetriableException e) {
if (maxRetires > 0 && remainingRetries == 0) {
throw new ConnectException("Retries exhausted, ending the task. Manual restart is required.");
}else{
logger.warn("Setting the task timeout to {} ms upon RetriableException", timeout);
initWriter(config);
context.timeout(timeout);
remainingRetries--;
throw e;
}
}
this.remainingRetries = maxRetires;
}
示例10: send
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
/**
* Sends a message to MQ. Adds the message to the current transaction. Reconnects to MQ if required.
*
* @param r The message and schema to send
*
* @throws RetriableException Operation failed, but connector should continue to retry.
* @throws ConnectException Operation failed and connector should stop.
*/
public void send(SinkRecord r) throws ConnectException, RetriableException {
connectInternal();
try {
Message m = builder.fromSinkRecord(jmsCtxt, r);
inflight = true;
jmsProd.send(queue, m);
}
catch (JMSRuntimeException jmse) {
log.debug("JMS exception {}", jmse);
handleException(jmse);
}
}
示例11: commit
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
/**
* Commits the current transaction.
*
* @throws RetriableException Operation failed, but connector should continue to retry.
* @throws ConnectException Operation failed and connector should stop.
*/
public void commit() throws ConnectException, RetriableException {
connectInternal();
try {
if (inflight) {
inflight = false;
}
jmsCtxt.commit();
}
catch (JMSRuntimeException jmse) {
log.debug("JMS exception {}", jmse);
handleException(jmse);
}
}
示例12: commitRecord
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void commitRecord(SourceRecord record) throws InterruptedException {
Long deliveryTag = (Long) record.sourceOffset().get("deliveryTag");
try {
this.channel.basicAck(deliveryTag, false);
} catch (IOException e) {
throw new RetriableException(e);
}
}
示例13: put
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Override
public void put(Collection<SinkRecord> sinkRecords) {
for (SinkRecord record : sinkRecords) {
log.trace("current sinkRecord value: " + record.value());
if (!(record.value() instanceof byte[])) {
throw new ConnectException("the value of the record has an invalid type (must be of type byte[])");
}
try {
channel.basicPublish(this.config.exchange, this.config.routingKey, null, (byte[]) record.value());
} catch (IOException e) {
log.error("There was an error while publishing the outgoing message to RabbitMQ");
throw new RetriableException(e);
}
}
}
示例14: deliverMessages
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
private void deliverMessages() {
// Finally, deliver this batch to the sink
try {
// Since we reuse the messageBatch buffer, ensure we give the task its own copy
task.put(new ArrayList<>(messageBatch));
for (SinkRecord record : messageBatch)
currentOffsets.put(new TopicPartition(record.topic(), record.kafkaPartition()),
new OffsetAndMetadata(record.kafkaOffset() + 1));
messageBatch.clear();
// If we had paused all consumer topic partitions to try to redeliver data, then we should resume any that
// the task had not explicitly paused
if (pausedForRedelivery) {
if (!shouldPause())
resumeAll();
pausedForRedelivery = false;
}
} catch (RetriableException e) {
log.error("RetriableException from SinkTask {}:", id, e);
// If we're retrying a previous batch, make sure we've paused all topic partitions so we don't get new data,
// but will still be able to poll in order to handle user-requested timeouts, keep group membership, etc.
pausedForRedelivery = true;
pauseAll();
// Let this exit normally, the batch will be reprocessed on the next loop.
} catch (Throwable t) {
log.error("Task {} threw an uncaught and unrecoverable exception", id, t);
log.error("Task is being killed and will not recover until manually restarted");
throw new ConnectException("Exiting WorkerSinkTask due to unrecoverable exception.");
}
}
示例15: testPollRedelivery
import org.apache.kafka.connect.errors.RetriableException; //导入依赖的package包/类
@Test
public void testPollRedelivery() throws Exception {
expectInitializeTask();
expectPollInitialAssignment();
// If a retriable exception is thrown, we should redeliver the same batch, pausing the consumer in the meantime
expectConsumerPoll(1);
expectConversionAndTransformation(1);
Capture<Collection<SinkRecord>> records = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(records));
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Pause
HashSet<TopicPartition> partitions = new HashSet<>(asList(TOPIC_PARTITION, TOPIC_PARTITION2));
EasyMock.expect(consumer.assignment()).andReturn(partitions);
consumer.pause(partitions);
PowerMock.expectLastCall();
// Retry delivery should succeed
expectConsumerPoll(0);
sinkTask.put(EasyMock.capture(records));
EasyMock.expectLastCall();
// And unpause
EasyMock.expect(consumer.assignment()).andReturn(partitions);
consumer.resume(singleton(TOPIC_PARTITION));
PowerMock.expectLastCall();
consumer.resume(singleton(TOPIC_PARTITION2));
PowerMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
PowerMock.verifyAll();
}