当前位置: 首页>>代码示例>>Java>>正文


Java ConnectException类代码示例

本文整理汇总了Java中org.apache.kafka.connect.errors.ConnectException的典型用法代码示例。如果您正苦于以下问题:Java ConnectException类的具体用法?Java ConnectException怎么用?Java ConnectException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConnectException类属于org.apache.kafka.connect.errors包,在下文中一共展示了ConnectException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: toSourceRecord

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
/**
 * Convert a message into a Kafka Connect SourceRecord.
 * 
 * @param context            the JMS context to use for building messages
 * @param topic              the Kafka topic
 * @param messageBodyJms     whether to interpret MQ messages as JMS messages
 * @param message            the message
 * 
 * @return the Kafka Connect SourceRecord
 * 
 * @throws JMSException      Message could not be converted
 */
@Override public SourceRecord toSourceRecord(JMSContext context, String topic, boolean messageBodyJms, Message message) throws JMSException {
    byte[] payload;
    if (message instanceof BytesMessage) {
        payload = message.getBody(byte[].class);
    }
    else if (message instanceof TextMessage) {
        String s = message.getBody(String.class);
        payload = s.getBytes(UTF_8);
    }
    else {
        log.error("Unsupported JMS message type {}", message.getClass());
        throw new ConnectException("Unsupported JMS message type");
    }

    SchemaAndValue sv = converter.toConnectData(topic, payload);
    return new SourceRecord(null, null, topic, sv.schema(), sv.value());
}
 
开发者ID:ibm-messaging,项目名称:kafka-connect-mq-source,代码行数:30,代码来源:JsonRecordBuilder.java

示例2: connectInternal

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
/**
 * Internal method to connect to MQ.
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
private void connectInternal() throws ConnectException, RetriableException {
    if (connected) {
        return;
    }

    try {
        if (userName != null) {
            jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED);
        }
        else {
            jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED);
        }            

        jmsProd = jmsCtxt.createProducer();
        jmsProd.setDeliveryMode(deliveryMode);
        jmsProd.setTimeToLive(timeToLive);
        connected = true;
    }
    catch (JMSRuntimeException jmse) {
        log.debug("JMS exception {}", jmse);
        handleException(jmse);
    }
}
 
开发者ID:ibm-messaging,项目名称:kafka-connect-mq-sink,代码行数:30,代码来源:JMSWriter.java

示例3: commit

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
/**
 * Commits the current transaction. If the current transaction contains a message that could not
 * be processed, the transaction is "in peril" and is rolled back instead to avoid data loss.
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
public void commit() throws ConnectException, RetriableException {
    connectInternal();
    try {
        if (inflight) {
            inflight = false;

            if (inperil) {
                inperil = false;
                log.trace("Rolling back in-flight transaction");
                jmsCtxt.rollback();
                throw new RetriableException("Transaction rolled back");
            }
            else {
                jmsCtxt.commit();
            }
        }
    }
    catch (JMSRuntimeException jmse) {
        log.debug("JMS exception {}", jmse);
        handleException(jmse);
    }
}
 
开发者ID:ibm-messaging,项目名称:kafka-connect-mq-source,代码行数:30,代码来源:JMSReader.java

示例4: flush

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
public void flush(final List<String> sqlBatch) throws SQLException {
    if (sqlBatch == null || sqlBatch.isEmpty()) {
        return;
    }
    Connection connection = connectionHolder.getValidConnection();
    connection.setAutoCommit(false);

    Statement statement = connection.createStatement();
    for (String sql : sqlBatch) {
        log.info("===>>>statement addBatch sql:{}", sql);
        statement.addBatch(sql);
    }
    int[] updateCountArr = statement.executeBatch();
    if (updateCountArr.length != sqlBatch.size()) {
        throw new ConnectException(String.format("updateCountArr size:(%d) not equals to sqlBatch size:(%d)", updateCountArr.length, sqlBatch.size()));
    }
    connection.commit();
    statement.close();
}
 
开发者ID:songxin1990,项目名称:maxwell-sink,代码行数:20,代码来源:MySqlDbWriter.java

示例5: seek

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public void seek(Offset offset) {
    if (offset.getRecordOffset() < 0) {
        throw new IllegalArgumentException("Record offset must be greater than 0");
    }
    try {
        if (offset.getRecordOffset() < reader.getLineNumber()) {
            this.reader = new LineNumberReader(new InputStreamReader(getFs().open(getFilePath())));
            currentLine = null;
        }
        while ((currentLine = reader.readLine()) != null) {
            if (reader.getLineNumber() - 1 == offset.getRecordOffset()) {
                this.offset.setOffset(reader.getLineNumber());
                return;
            }
        }
        this.offset.setOffset(reader.getLineNumber());
    } catch (IOException ioe) {
        throw new ConnectException("Error seeking file " + getFilePath(), ioe);
    }
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:22,代码来源:TextFileReader.java

示例6: poll

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public List<SourceRecord> poll() throws InterruptedException {
    while (stop != null && !stop.get() && !policy.hasEnded()) {
        log.trace("Polling for new data");

        final List<SourceRecord> results = new ArrayList<>();
        List<FileMetadata> files = filesToProcess();
        files.forEach(metadata -> {
            try (FileReader reader = policy.offer(metadata, context.offsetStorageReader())) {
                log.info("Processing records for file {}", metadata);
                while (reader.hasNext()) {
                    results.add(convert(metadata, reader.currentOffset(), reader.next()));
                }
            } catch (ConnectException | IOException e) {
                //when an exception happens reading a file, the connector continues
                log.error("Error reading file from FS: " + metadata.getPath() + ". Keep going...", e);
            }
        });
        return results;
    }

    return null;
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:24,代码来源:FsSourceTask.java

示例7: flush

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
public void flush(final List<String> sqlBatch) throws SQLException {
	if (sqlBatch == null || sqlBatch.isEmpty()) {
		return;
	}
	Connection connection = connectionHolder.getValidConnection();
	connection.setAutoCommit(false);

	Statement statement = connection.createStatement();
	for (String sql : sqlBatch) {
		statement.addBatch(sql);
	}
	int[] updateCountArr = statement.executeBatch();
	if (updateCountArr.length != sqlBatch.size()) {
		throw new ConnectException(String.format("updateCountArr size:(%d) not equals to sqlBatch size:(%d)",
				updateCountArr.length, sqlBatch.size()));
	}
	connection.commit();
	statement.close();
}
 
开发者ID:songxin1990,项目名称:maxwell-sink,代码行数:20,代码来源:MySqlDbWriter.java

示例8: close

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
public void close(Collection<TopicPartition> partitions) {
  // Close any writers we have. We may get assigned the same partitions and end up duplicating
  // some effort since we'll have to reprocess those messages. It may be possible to hold on to
  // the TopicPartitionWriter and continue to use the temp file, but this can get significantly
  // more complex due to potential failures and network partitions. For example, we may get
  // this close, then miss a few generations of group membership, during which
  // data may have continued to be processed and we'd have to restart from the recovery stage,
  // make sure we apply the WAL, and only reuse the temp file if the starting offset is still
  // valid. For now, we prefer the simpler solution that may result in a bit of wasted effort.
  for (TopicPartition tp: assignment) {
    try {
      topicPartitionWriters.get(tp).close();
    } catch (ConnectException e) {
      log.error("Error closing writer for {}. Error: {}", tp, e.getMessage());
    } finally {
      topicPartitionWriters.remove(tp);
    }
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:DataWriter.java

示例9: start

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public void start(Map<String, String> properties) {
    //TODO: Create resources like database or api connections here.
    FluentdSinkConnectorConfig config = new FluentdSinkConnectorConfig(properties);
    Fluency.Config fluencyConfig = new Fluency.Config()
            .setMaxBufferSize(config.getFluentdClientMaxBufferSize())
            .setBufferChunkInitialSize(config.getFluentdClientBufferChunkInitialSize())
            .setBufferChunkRetentionSize(config.getFluentdClientBufferChunkRetentionSize())
            .setFlushIntervalMillis(config.getFluentdClientFlushInterval())
            .setAckResponseMode(config.getFluentdClientAckResponseMode())
            .setFileBackupDir(config.getFluentdClientFileBackupDir())
            .setWaitUntilBufferFlushed(config.getFluentdClientWaitUntilBufferFlushed())
            .setWaitUntilFlusherTerminated(config.getFluentdClientWaitUntilFlusherTerminated())
            .setJvmHeapBufferMode(config.getFluentdClientJvmHeapBufferMode());
    try {
        fluency = Fluency.defaultFluency(config.getFluentdConnectAddresses(), fluencyConfig);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
    converter = new SinkRecordConverter(config);
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:22,代码来源:FluentdSinkTask.java

示例10: getWriter

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
private RecordWriter<SinkRecord> getWriter(SinkRecord record, String encodedPartition)
    throws ConnectException {
  try {
    if (writers.containsKey(encodedPartition)) {
      return writers.get(encodedPartition);
    }
    String tempFile = getTempFile(encodedPartition);
    RecordWriter<SinkRecord> writer = writerProvider.getRecordWriter(conf, tempFile, record, avroData);
    writers.put(encodedPartition, writer);
    if (hiveIntegration && !hivePartitions.contains(encodedPartition)) {
      addHivePartition(encodedPartition);
      hivePartitions.add(encodedPartition);
    }
    return writer;
  } catch (IOException e) {
    throw new ConnectException(e);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:TopicPartitionWriter.java

示例11: put

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public void put(final Collection<SinkRecord> records) {
	if (records == null || records.isEmpty()) {
		return;
	}
	log.info("===>>>records size:{}", records.size());
	try {
		writer.batchWrite(records);
	} catch (SQLException e) {
		log.error(e.getMessage(), e);
		if (remainRetries <= 0) {
			throw new ConnectException(e);
		} else {
			writer.closeQuietly();
			writer = new MySqlDbWriter(config);
			remainRetries--;
			context.timeout(config.retryBackoffMs);
			throw new RetriableException(e);
		}
	}
	remainRetries = config.maxRetries;
}
 
开发者ID:songxin1990,项目名称:maxwell-sink,代码行数:23,代码来源:MySqlSinkTask.java

示例12: timestampTypeFromSchema

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
/**
 * Determine the type/format of the timestamp based on the schema
 */
private String timestampTypeFromSchema(Schema schema) {
    if (Timestamp.LOGICAL_NAME.equals(schema.name())) {
        return TYPE_TIMESTAMP;
    } else if (org.apache.kafka.connect.data.Date.LOGICAL_NAME.equals(schema.name())) {
        return TYPE_DATE;
    } else if (Time.LOGICAL_NAME.equals(schema.name())) {
        return TYPE_TIME;
    } else if (schema.type().equals(Schema.Type.STRING)) {
        // If not otherwise specified, string == user-specified string format for timestamps
        return TYPE_STRING;
    } else if (schema.type().equals(Schema.Type.INT64)) {
        // If not otherwise specified, long == unix time
        return TYPE_UNIX;
    }
    throw new ConnectException("Schema " + schema + " does not correspond to a known timestamp type format");
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:TimestampConverter.java

示例13: convertTimestamp

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
/**
 * Convert the given timestamp to the target timestamp format.
 * @param timestamp the input timestamp
 * @param timestampFormat the format of the timestamp, or null if the format should be inferred
 * @return the converted timestamp
 */
private Object convertTimestamp(Object timestamp, String timestampFormat) {
    if (timestampFormat == null) {
        timestampFormat = inferTimestampType(timestamp);
    }

    TimestampTranslator sourceTranslator = TRANSLATORS.get(timestampFormat);
    if (sourceTranslator == null) {
        throw new ConnectException("Unsupported timestamp type: " + timestampFormat);
    }
    Date rawTimestamp = sourceTranslator.toRaw(config, timestamp);

    TimestampTranslator targetTranslator = TRANSLATORS.get(config.type);
    if (targetTranslator == null) {
        throw new ConnectException("Unsupported timestamp type: " + config.type);
    }
    return targetTranslator.toType(config, rawTimestamp);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:TimestampConverter.java

示例14: put

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public void put(final Collection<SinkRecord> records) {
    if (records.isEmpty()) {
        return;
    }
    log.info("===>>>records size:{}", records.size());
    try {
        writer.batchWrite(records);
    } catch (SQLException e) {
        log.error(e.getMessage(), e);
        if (remainRetries <= 0) {
            throw new ConnectException(e);
        } else {
            writer.closeQuietly();
            writer = new MySqlDbWriter(config);
            remainRetries--;
            context.timeout(config.retryBackoffMs);
            throw new RetriableException(e);
        }
    }
    remainRetries = config.maxRetries;
}
 
开发者ID:songxin1990,项目名称:maxwell-sink,代码行数:23,代码来源:MySqlSinkTask.java

示例15: deleteConnectorConfig

import org.apache.kafka.connect.errors.ConnectException; //导入依赖的package包/类
@Override
public synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback) {
    try {
        if (!configState.contains(connName)) {
            // Deletion, must already exist
            callback.onCompletion(new NotFoundException("Connector " + connName + " not found", null), null);
            return;
        }

        removeConnectorTasks(connName);
        worker.stopConnector(connName);
        configBackingStore.removeConnectorConfig(connName);
        onDeletion(connName);
        callback.onCompletion(null, new Created<ConnectorInfo>(false, null));
    } catch (ConnectException e) {
        callback.onCompletion(e, null);
    }

}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:StandaloneHerder.java


注:本文中的org.apache.kafka.connect.errors.ConnectException类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。