当前位置: 首页>>代码示例>>Java>>正文


Java ConfigException类代码示例

本文整理汇总了Java中org.apache.kafka.common.config.ConfigException的典型用法代码示例。如果您正苦于以下问题:Java ConfigException类的具体用法?Java ConfigException怎么用?Java ConfigException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConfigException类属于org.apache.kafka.common.config包,在下文中一共展示了ConfigException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Override
public void init(Properties props) {
  if (props == null) {
    throw new ConfigException("Missing schema registry url!");
  }
  String url = props.getProperty(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG);
  if (url == null) {
    throw new ConfigException("Missing schema registry url!");
  }
  schemaRegistry = new CachedSchemaRegistryClient(
      url, AbstractKafkaAvroSerDeConfig.MAX_SCHEMAS_PER_SUBJECT_DEFAULT);

  if (props.containsKey("print.key")) {
    printKey = props.getProperty("print.key").trim().toLowerCase().equals("true");
  }
  if (props.containsKey("key.separator")) {
    keySeparator = props.getProperty("key.separator").getBytes();
  }
  if (props.containsKey("line.separator")) {
    lineSeparator = props.getProperty("line.separator").getBytes();
  }
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:23,代码来源:AvroMessageFormatter.java

示例2: configure

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Override
public void configure(Map<String, Object> config) {
  String localeString = (String) config.get(HdfsSinkConnectorConfig.LOCALE_CONFIG);
  if (localeString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.LOCALE_CONFIG,
                              localeString, "Locale cannot be empty.");
  }
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  if (timeZoneString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG,
                              timeZoneString, "Timezone cannot be empty.");
  }
  String hiveIntString = (String) config.get(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
  boolean hiveIntegration = hiveIntString != null && hiveIntString.toLowerCase().equals("true");
  Locale locale = new Locale(localeString);
  DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
  init(partitionDurationMs, pathFormat, locale, timeZone, hiveIntegration);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:HourlyPartitioner.java

示例3: _testInvalidRowKeyProperty

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
private void _testInvalidRowKeyProperty(String rowKey) {
  AmpoolSinkTask taskWithInvalidConfig = new AmpoolSinkTask();
  Exception expectedException = null;
  expectedException = null;
  try {
    taskWithInvalidConfig.start(new HashMap<String, String>() {{
      put("locator.host", "localhost");
      put("locator.port", Integer.toString(12345));
      put("batch.size", Integer.toString(100));
      put("topics", "topic1, topic2, topic3");
      put("ampool.tables", "table1, table2, table3");
      put(AmpoolSinkConnectorConfig.TABLES_ROWKEY_COLUMNS, rowKey);
    }});
  }catch (ConfigException ex){
    System.out.println("rowKey = [ " + rowKey + " ], Cought expected exception = "+ ex.getMessage());
    ex.printStackTrace();

    expectedException = ex;
    taskWithInvalidConfig.stop();
  }
  Assert.assertTrue(expectedException instanceof ConfigException);
}
 
开发者ID:ampool,项目名称:monarch,代码行数:23,代码来源:AmpoolSinkConnectorConfigTest.java

示例4: testInvalidRetryConfig

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Test
public void testInvalidRetryConfig() {
  AmpoolSinkTask taskWithInvalidConfig = new AmpoolSinkTask();
  Exception expectedException = null;
  try {
    taskWithInvalidConfig.start(new HashMap<String, String>() {{
      put("locator.host", "invalidHost");
      put("locator.port", Integer.toString(12345));
      put("batch.size", Integer.toString(100));
      put("topics", "topic1, topic2, topic3");
      put("ampool.tables", "table1, table2, table3");
      put("max.retries", "invalidParam");
      put("retry.interval.ms", Integer.toString(30000));
    }});
  } catch (ConfigException ex) {
    //ConfigException: Invalid configuration specified for retry-interval or max-retries!
    expectedException = ex;
    taskWithInvalidConfig.stop();
  }
  Assert.assertTrue(expectedException instanceof ConfigException);
}
 
开发者ID:ampool,项目名称:monarch,代码行数:22,代码来源:AmpoolSinkConnectorConfigTest.java

示例5: configure

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Override
public void configure(Map<String, ?> props) {
    final SimpleConfig config = new SimpleConfig(CONFIG_DEF, props);
    topicField = InsertionSpec.parse(config.getString(ConfigName.TOPIC_FIELD));
    partitionField = InsertionSpec.parse(config.getString(ConfigName.PARTITION_FIELD));
    offsetField = InsertionSpec.parse(config.getString(ConfigName.OFFSET_FIELD));
    timestampField = InsertionSpec.parse(config.getString(ConfigName.TIMESTAMP_FIELD));
    staticField = InsertionSpec.parse(config.getString(ConfigName.STATIC_FIELD));
    staticValue = config.getString(ConfigName.STATIC_VALUE);

    if (topicField == null && partitionField == null && offsetField == null && timestampField == null && staticField == null) {
        throw new ConfigException("No field insertion configured");
    }

    if (staticField != null && staticValue == null) {
        throw new ConfigException(ConfigName.STATIC_VALUE, null, "No value specified for static field: " + staticField);
    }

    schemaUpdateCache = new SynchronizedCache<>(new LRUCache<Schema, Schema>(16));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:InsertField.java

示例6: configDef

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
public static ConfigDef configDef() {
    return new ConfigDef()
            .define(NAME_CONFIG, Type.STRING, Importance.HIGH, NAME_DOC, COMMON_GROUP, 1, Width.MEDIUM, NAME_DISPLAY)
            .define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, 2, Width.LONG, CONNECTOR_CLASS_DISPLAY)
            .define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, 3, Width.SHORT, TASK_MAX_DISPLAY)
            .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, 4, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY)
            .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, 5, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY)
            .define(TRANSFORMS_CONFIG, Type.LIST, null, new ConfigDef.Validator() {
                @Override
                public void ensureValid(String name, Object value) {
                    if (value == null) return;
                    final List<String> transformAliases = (List<String>) value;
                    if (transformAliases.size() > new HashSet<>(transformAliases).size()) {
                        throw new ConfigException(name, value, "Duplicate alias provided.");
                    }
                }
            }, Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, 6, Width.LONG, TRANSFORMS_DISPLAY);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:ConnectorConfig.java

示例7: configure

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
    if (inner == null) {
        String propertyName = isKey ? "key.serializer.inner.class" : "value.serializer.inner.class";
        Object innerSerializerClass = configs.get(propertyName);
        propertyName = (innerSerializerClass == null) ? "serializer.inner.class" : propertyName;
        String value = null;
        try {
            value = (String) configs.get(propertyName);
            inner = Serializer.class.cast(Utils.newInstance(value, Serializer.class));
            inner.configure(configs, isKey);
        } catch (ClassNotFoundException e) {
            throw new ConfigException(propertyName, value, "Class " + value + " could not be found.");
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:WindowedSerializer.java

示例8: configure

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
    if (inner == null) {
        String propertyName = isKey ? "key.deserializer.inner.class" : "value.deserializer.inner.class";
        Object innerDeserializerClass = configs.get(propertyName);
        propertyName = (innerDeserializerClass == null) ? "deserializer.inner.class" : propertyName;
        String value = null;
        try {
            value = (String) configs.get(propertyName);
            inner = Deserializer.class.cast(Utils.newInstance(value, Deserializer.class));
            inner.configure(configs, isKey);
        } catch (ClassNotFoundException e) {
            throw new ConfigException(propertyName, value, "Class " + value + " could not be found.");
        }
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:WindowedDeserializer.java

示例9: getCommonConsumerConfigs

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
private Map<String, Object> getCommonConsumerConfigs() throws ConfigException {
    final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(CONSUMER_PREFIX, ConsumerConfig.configNames());

    // disable auto commit and throw exception if there is user overridden values,
    // this is necessary for streams commit semantics
    if (clientProvidedProps.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
        throw new ConfigException("Unexpected user-specified consumer config " + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG
            + ", as the streams client will always turn off auto committing.");
    }
    if (eosEnabled) {
        if (clientProvidedProps.containsKey(ConsumerConfig.ISOLATION_LEVEL_CONFIG)) {
            throw new ConfigException("Unexpected user-specified consumer config " + ConsumerConfig.ISOLATION_LEVEL_CONFIG
                + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' consumers will always read committed data only.");
        }
    }

    final Map<String, Object> consumerProps = new HashMap<>(eosEnabled ? CONSUMER_EOS_OVERRIDES : CONSUMER_DEFAULT_OVERRIDES);
    consumerProps.putAll(clientProvidedProps);

    // bootstrap.servers should be from StreamsConfig
    consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG));
    // remove deprecate ZK config
    consumerProps.remove(ZOOKEEPER_CONNECT_CONFIG);

    return consumerProps;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:StreamsConfig.java

示例10: getConsumerConfigs

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
/**
 * Get the configs to the {@link KafkaConsumer consumer}.
 * Properties using the prefix {@link #CONSUMER_PREFIX} will be used in favor over their non-prefixed versions
 * except in the case of {@link ConsumerConfig#BOOTSTRAP_SERVERS_CONFIG} where we always use the non-prefixed
 * version as we only support reading/writing from/to the same Kafka Cluster.
 *
 * @param streamThread the {@link StreamThread} creating a consumer
 * @param groupId      consumer groupId
 * @param clientId     clientId
 * @return Map of the consumer configuration.
 * @throws ConfigException if {@code "enable.auto.commit"} was set to {@code false} by the user
 */
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
                                              final String groupId,
                                              final String clientId) throws ConfigException {
    final Map<String, Object> consumerProps = getCommonConsumerConfigs();

    // add client id with stream client id prefix, and group id
    consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer");

    // add configs required for stream partition assignor
    consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread);
    consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
    consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
    consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName());
    consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));

    consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));

    return consumerProps;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:33,代码来源:StreamsConfig.java

示例11: getProducerConfigs

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
/**
 * Get the configs for the {@link KafkaProducer producer}.
 * Properties using the prefix {@link #PRODUCER_PREFIX} will be used in favor over their non-prefixed versions
 * except in the case of {@link ProducerConfig#BOOTSTRAP_SERVERS_CONFIG} where we always use the non-prefixed
 * version as we only support reading/writing from/to the same Kafka Cluster.
 *
 * @param clientId clientId
 * @return Map of the producer configuration.
 */
public Map<String, Object> getProducerConfigs(final String clientId) {
    final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());

    if (eosEnabled) {
        if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) {
            throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG
                + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled.");
        }

        if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) {
            throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION
                + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection.");
        }
    }

    // generate producer configs from original properties and overridden maps
    final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES);
    props.putAll(clientProvidedProps);

    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG));
    // add client id with stream client id prefix
    props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer");

    return props;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:35,代码来源:StreamsConfig.java

示例12: ClientMetadata

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
ClientMetadata(final String endPoint) {

            // get the host info if possible
            if (endPoint != null) {
                final String host = getHost(endPoint);
                final Integer port = getPort(endPoint);

                if (host == null || port == null)
                    throw new ConfigException(String.format("Error parsing host address %s. Expected format host:port.", endPoint));

                hostInfo = new HostInfo(host, port);
            } else {
                hostInfo = null;
            }

            // initialize the consumer memberIds
            consumers = new HashSet<>();

            // initialize the client state
            state = new ClientState();
        }
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:StreamPartitionAssignor.java

示例13: shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Test
public void shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair() throws Exception {
    final Properties properties = configProps();
    final String myEndPoint = "localhost";
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
    final StreamsConfig config = new StreamsConfig(properties);
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);

    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1,
                                                       new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
                                                       0);

    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));

    try {
        partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
        Assert.fail("expected to an exception due to invalid config");
    } catch (ConfigException e) {
        // pass
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:25,代码来源:StreamPartitionAssignorTest.java

示例14: shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
@Test
public void shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger() throws Exception {
    final Properties properties = configProps();
    final String myEndPoint = "localhost:j87yhk";
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
    final StreamsConfig config = new StreamsConfig(properties);
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);


    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1,
                                                       new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
                                                       0);

    try {
        partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
        Assert.fail("expected to an exception due to invalid config");
    } catch (ConfigException e) {
        // pass
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:StreamPartitionAssignorTest.java

示例15: configureRetries

import org.apache.kafka.common.config.ConfigException; //导入依赖的package包/类
private static int configureRetries(ProducerConfig config, boolean idempotenceEnabled) {
    boolean userConfiguredRetries = false;
    if (config.originals().containsKey(ProducerConfig.RETRIES_CONFIG)) {
        userConfiguredRetries = true;
    }
    if (idempotenceEnabled && !userConfiguredRetries) {
        // We recommend setting infinite retries when the idempotent producer is enabled, so it makes sense to make
        // this the default.
        log.info("Overriding the default retries config to the recommended value of {} since the idempotent " +
                "producer is enabled.", Integer.MAX_VALUE);
        return Integer.MAX_VALUE;
    }
    if (idempotenceEnabled && config.getInt(ProducerConfig.RETRIES_CONFIG) == 0) {
        throw new ConfigException("Must set " + ProducerConfig.RETRIES_CONFIG + " to non-zero when using the idempotent producer.");
    }
    return config.getInt(ProducerConfig.RETRIES_CONFIG);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:KafkaProducer.java


注:本文中的org.apache.kafka.common.config.ConfigException类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。