本文整理匯總了Java中org.apache.kafka.common.config.ConfigDef類的典型用法代碼示例。如果您正苦於以下問題:Java ConfigDef類的具體用法?Java ConfigDef怎麽用?Java ConfigDef使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ConfigDef類屬於org.apache.kafka.common.config包,在下文中一共展示了ConfigDef類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: RabbitMQConnectorConfig
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public RabbitMQConnectorConfig(ConfigDef definition, Map<?, ?> originals) {
super(definition, originals);
this.username = this.getString(USERNAME_CONFIG);
this.password = this.getString(PASSWORD_CONFIG);
this.virtualHost = this.getString(VIRTUAL_HOST_CONFIG);
this.requestedChannelMax = this.getInt(REQUESTED_CHANNEL_MAX_CONFIG);
this.requestedFrameMax = this.getInt(REQUESTED_FRAME_MAX_CONFIG);
this.connectionTimeout = this.getInt(CONNECTION_TIMEOUT_CONFIG);
this.handshakeTimeout = this.getInt(HANDSHAKE_TIMEOUT_CONFIG);
this.shutdownTimeout = this.getInt(SHUTDOWN_TIMEOUT_CONFIG);
this.requestedHeartbeat = this.getInt(REQUESTED_HEARTBEAT_CONFIG);
this.automaticRecoveryEnabled = this.getBoolean(AUTOMATIC_RECOVERY_ENABLED_CONFIG);
this.topologyRecoveryEnabled = this.getBoolean(TOPOLOGY_RECOVERY_ENABLED_CONFIG);
this.networkRecoveryInterval = this.getInt(NETWORK_RECOVERY_INTERVAL_CONFIG);
this.host = this.getString(HOST_CONFIG);
this.port = this.getInt(PORT_CONFIG);
this.connectionFactory = connectionFactory();
}
示例2: conf
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef conf() {
return new ConfigDef()
.define(INFINISPAN_CONNECTION_HOSTS_CONF, Type.STRING, INFINISPAN_CONNECTION_HOSTS_DEFAULT, Importance.HIGH,
INFINISPAN_CONNECTION_HOSTS_DOC)
.define(INFINISPAN_CONNECTION_HOTROD_PORT_CONF, Type.INT, INFINISPAN_CONNECTION_HOTROD_PORT_DEFAULT,
Importance.HIGH, INFINISPAN_CONNECTION_HOTROD_PORT_DOC)
.define(INFINISPAN_CONNECTION_CACHE_NAME_CONF, Type.STRING, INFINISPAN_CONNECTION_CACHE_NAME_DEFAULT,
Importance.MEDIUM, INFINISPAN_CONNECTION_CACHE_NAME_DOC)
.define(INFINISPAN_USE_PROTO_CONF, Type.BOOLEAN, INFINISPAN_USE_PROTO_DEFAULT, Importance.MEDIUM,
INFINISPAN_USE_PROTO_DOC)
.define(INFINISPAN_PROTO_MARSHALLER_CLASS_CONF, Type.CLASS, INFINISPAN_PROTO_MARSHALLER_CLASS_DEFAULT,
Importance.MEDIUM, INFINISPAN_PROTO_MARSHALLER_CLASS_DOC)
.define(INFINISPAN_CACHE_FORCE_RETURN_VALUES_CONF, Type.BOOLEAN,
INFINISPAN_CACHE_FORCE_RETURN_VALUES_DEFAULT, Importance.LOW,
INFINISPAN_CACHE_FORCE_RETURN_VALUES_DOC)
.define(INFINISPAN_USE_LIFESPAN_CONF, Type.BOOLEAN, INFINISPAN_USE_LIFESPAN_DEFAULT,
Importance.LOW, INFINISPAN_USE_LIFESPAN_DOC)
.define(INFINISPAN_USE_MAX_IDLE_CONF, Type.BOOLEAN, INFINISPAN_USE_MAX_IDLE_DEFAULT,
Importance.LOW, INFINISPAN_USE_MAX_IDLE_DOC)
.define(INFINISPAN_LIFESPAN_ENTRY_CONF, Type.LONG, INFINISPAN_LIFESPAN_ENTRY_DEFAULT,
Importance.LOW, INFINISPAN_LIFESPAN_ENTRY_DOC)
.define(INFINISPAN_MAX_IDLE_ENTRY_CONF, Type.LONG, INFINISPAN_MAX_IDLE_ENTRY_DEFAULT,
Importance.LOW, INFINISPAN_MAX_IDLE_ENTRY_DOC);
}
示例3: convertConfigKey
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
private static ConfigKeyInfo convertConfigKey(ConfigKey configKey) {
String name = configKey.name;
Type type = configKey.type;
String typeName = configKey.type.name();
boolean required = false;
String defaultValue;
if (ConfigDef.NO_DEFAULT_VALUE.equals(configKey.defaultValue)) {
defaultValue = null;
required = true;
} else {
defaultValue = ConfigDef.convertToString(configKey.defaultValue, type);
}
String importance = configKey.importance.name();
String documentation = configKey.documentation;
String group = configKey.group;
int orderInGroup = configKey.orderInGroup;
String width = configKey.width.name();
String displayName = configKey.displayName;
List<String> dependents = configKey.dependents;
return new ConfigKeyInfo(name, typeName, required, defaultValue, importance, documentation, group, orderInGroup, width, displayName, dependents);
}
示例4: configDef
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef configDef() {
return new ConfigDef()
.define(NAME_CONFIG, Type.STRING, Importance.HIGH, NAME_DOC, COMMON_GROUP, 1, Width.MEDIUM, NAME_DISPLAY)
.define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, 2, Width.LONG, CONNECTOR_CLASS_DISPLAY)
.define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, 3, Width.SHORT, TASK_MAX_DISPLAY)
.define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, 4, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY)
.define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, 5, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY)
.define(TRANSFORMS_CONFIG, Type.LIST, null, new ConfigDef.Validator() {
@Override
public void ensureValid(String name, Object value) {
if (value == null) return;
final List<String> transformAliases = (List<String>) value;
if (transformAliases.size() > new HashSet<>(transformAliases).size()) {
throw new ConfigException(name, value, "Duplicate alias provided.");
}
}
}, Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, 6, Width.LONG, TRANSFORMS_DISPLAY);
}
示例5: validateBasicConnectorConfig
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
@Override
protected Map<String, ConfigValue> validateBasicConnectorConfig(Connector connector,
ConfigDef configDef,
Map<String, String> config) {
Map<String, ConfigValue> validatedConfig = super.validateBasicConnectorConfig(connector, configDef, config);
if (connector instanceof SinkConnector) {
ConfigValue validatedName = validatedConfig.get(ConnectorConfig.NAME_CONFIG);
String name = (String) validatedName.value();
if (workerGroupId.equals(SinkUtils.consumerGroupId(name))) {
validatedName.addErrorMessage("Consumer group for sink connector named " + name +
" conflicts with Connect worker group " + workerGroupId);
}
}
return validatedConfig;
}
示例6: conf
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef conf() {
return new ConfigDef()
.define(FLUENTD_CONNECT, Type.STRING, "localhost:24224", Importance.HIGH,
"Connection specs for Fluentd")
.define(FLUENTD_CLIENT_MAX_BUFFER_BYTES, Type.LONG, null, Importance.MEDIUM,
"Max buffer size.")
.define(FLUENTD_CLIENT_BUFFER_CHUNK_INITIAL_BYTES, Type.INT, null, Importance.MEDIUM,
"Initial size of buffer chunk. Default: 1048576 (1MiB)")
.define(FLUENTD_CLIENT_BUFFER_CHUNK_RETENTION_BYTES, Type.INT, null, Importance.MEDIUM,
"Retention size of buffer chunk. Default: 4194304 (4MiB)")
.define(FLUENTD_CLIENT_FLUSH_INTERVAL, Type.INT, null, Importance.MEDIUM,
"Buffer flush interval in msec. Default: 600(msec)")
.define(FLUENTD_CLIENT_ACK_RESPONSE_MODE, Type.BOOLEAN, false, Importance.MEDIUM,
"Enable/Disable ack response mode. Default: false")
.define(FLUENTD_CLIENT_FILE_BACKUP_DIR, Type.STRING, null, Importance.MEDIUM,
"Enable file backup mode if specify backup directory path. Default: null")
.define(FLUENTD_CLIENT_WAIT_UNTIL_BUFFER_FLUSHED, Type.INT, null, Importance.MEDIUM,
"Max wait until all buffers are flushed in sec. Default: 60(sec)")
.define(FLUENTD_CLIENT_WAIT_UNTIL_FLUSHER_TERMINATED, Type.INT, null, Importance.MEDIUM,
"Max wait until the flusher is terminated in sec. Default: 60(sec)")
.define(FLUENTD_CLIENT_JVM_HEAP_BUFFER_MODE, Type.BOOLEAN, false, Importance.MEDIUM,
"If true use JVM heap memory for buffer pool. Default: false")
.define(FLUENTD_CLIENT_TIMESTAMP_INTEGER, Type.BOOLEAN, false, Importance.MEDIUM,
"If true use integer timestamp. Default: false");
}
示例7: conf
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef conf() {
return new ConfigDef()
.define(TOPIC_CONFIG, Type.STRING, Importance.HIGH, TOPIC_DOC)
.define(OWNER_CONFIG, Type.STRING, Importance.HIGH, OWNER_DOC)
.define(REPO_CONFIG, Type.STRING, Importance.HIGH, REPO_DOC)
.define(BATCH_SIZE_CONFIG, Type.INT, 100, new BatchSizeValidator(), Importance.LOW, BATCH_SIZE_DOC)
.define(SINCE_CONFIG, Type.STRING, ZonedDateTime.now().minusYears(1).toInstant().toString(),
new TimestampValidator(), Importance.HIGH, SINCE_DOC)
.define(AUTH_USERNAME_CONFIG, Type.STRING, "", Importance.HIGH, AUTH_USERNAME_DOC)
.define(AUTH_PASSWORD_CONFIG, Type.PASSWORD, "", Importance.HIGH, AUTH_PASSWORD_DOC);
}
示例8: conf
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef conf() {
return FsSourceConnectorConfig.conf()
.define(POLICY_CLASS, ConfigDef.Type.CLASS, ConfigDef.Importance.HIGH, POLICY_CLASS_DOC)
.define(POLICY_RECURSIVE, ConfigDef.Type.BOOLEAN, Boolean.TRUE, ConfigDef.Importance.LOW, POLICY_RECURSIVE_DOC)
.define(POLICY_REGEXP, ConfigDef.Type.STRING, ".*", ConfigDef.Importance.MEDIUM, POLICY_REGEXP_DOC)
.define(FILE_READER_CLASS, ConfigDef.Type.CLASS, ConfigDef.Importance.HIGH, FILE_READER_CLASS_DOC);
}
示例9: checkDocumentation
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
@Test
public void checkDocumentation() {
ConfigDef config = FsSourceConnectorConfig.conf();
config.names().forEach(key -> {
assertFalse("Property " + key + " should be documented",
config.configKeys().get(key).documentation == null ||
"".equals(config.configKeys().get(key).documentation.trim()));
});
}
示例10: checkDocumentation
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
@Test
public void checkDocumentation() {
ConfigDef config = FsSourceTaskConfig.conf();
config.names().forEach(key -> {
assertFalse("Property " + key + " should be documented",
config.configKeys().get(key).documentation == null ||
"".equals(config.configKeys().get(key).documentation.trim()));
});
}
示例11: config
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef config() {
return new ConfigDef()
.define(
ConfigKeyBuilder.of(HEADER_NAME_CONF, ConfigDef.Type.STRING)
.importance(ConfigDef.Importance.HIGH)
.documentation(HEADER_NAME_DOC)
.build()
);
}
示例12: config
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public static ConfigDef config() {
return RabbitMQConnectorConfig.config()
.define(TOPIC_CONF, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, TOPIC_DOC)
.define(PREFETCH_COUNT_CONF, ConfigDef.Type.INT, 0, ConfigDef.Importance.MEDIUM, PREFETCH_COUNT_DOC)
.define(PREFETCH_GLOBAL_CONF, ConfigDef.Type.BOOLEAN, false, ConfigDef.Importance.MEDIUM, PREFETCH_GLOBAL_DOC)
.define(QUEUE_CONF, ConfigDef.Type.LIST, ConfigDef.Importance.HIGH, QUEUE_DOC);
}
示例13: ConnectorConfig
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
public ConnectorConfig(Plugins plugins, ConfigDef configDef, Map<String, String> props) {
super(configDef, props);
enrichedConfig = new EnrichedConnectorConfig(
enrich(plugins, configDef, props, true),
props
);
}
示例14: getConfigDefFromTransformation
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
/**
* Return {@link ConfigDef} from {@code transformationCls}, which is expected to be a non-null {@code Class<Transformation>},
* by instantiating it and invoking {@link Transformation#config()}.
*/
static ConfigDef getConfigDefFromTransformation(String key, Class<?> transformationCls) {
if (transformationCls == null || !Transformation.class.isAssignableFrom(transformationCls)) {
throw new ConfigException(key, String.valueOf(transformationCls), "Not a Transformation");
}
try {
return (transformationCls.asSubclass(Transformation.class).newInstance()).config();
} catch (Exception e) {
throw new ConfigException(key, String.valueOf(transformationCls), "Error getting config definition from Transformation: " + e.getMessage());
}
}
示例15: baseConfigDef
import org.apache.kafka.common.config.ConfigDef; //導入依賴的package包/類
/**
* Get a basic ConfigDef for a WorkerConfig. This includes all the common settings. Subclasses can use this to
* bootstrap their own ConfigDef.
* @return a ConfigDef with all the common options specified
*/
protected static ConfigDef baseConfigDef() {
return new ConfigDef()
.define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, BOOTSTRAP_SERVERS_DEFAULT,
Importance.HIGH, BOOTSTRAP_SERVERS_DOC)
.define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS,
Importance.HIGH, KEY_CONVERTER_CLASS_DOC)
.define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS,
Importance.HIGH, VALUE_CONVERTER_CLASS_DOC)
.define(INTERNAL_KEY_CONVERTER_CLASS_CONFIG, Type.CLASS,
Importance.LOW, INTERNAL_KEY_CONVERTER_CLASS_DOC)
.define(INTERNAL_VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS,
Importance.LOW, INTERNAL_VALUE_CONVERTER_CLASS_DOC)
.define(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Type.LONG,
TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT, Importance.LOW,
TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC)
.define(OFFSET_COMMIT_INTERVAL_MS_CONFIG, Type.LONG, OFFSET_COMMIT_INTERVAL_MS_DEFAULT,
Importance.LOW, OFFSET_COMMIT_INTERVAL_MS_DOC)
.define(OFFSET_COMMIT_TIMEOUT_MS_CONFIG, Type.LONG, OFFSET_COMMIT_TIMEOUT_MS_DEFAULT,
Importance.LOW, OFFSET_COMMIT_TIMEOUT_MS_DOC)
.define(REST_HOST_NAME_CONFIG, Type.STRING, null, Importance.LOW, REST_HOST_NAME_DOC)
.define(REST_PORT_CONFIG, Type.INT, REST_PORT_DEFAULT, Importance.LOW, REST_PORT_DOC)
.define(REST_ADVERTISED_HOST_NAME_CONFIG, Type.STRING, null, Importance.LOW, REST_ADVERTISED_HOST_NAME_DOC)
.define(REST_ADVERTISED_PORT_CONFIG, Type.INT, null, Importance.LOW, REST_ADVERTISED_PORT_DOC)
.define(ACCESS_CONTROL_ALLOW_ORIGIN_CONFIG, Type.STRING,
ACCESS_CONTROL_ALLOW_ORIGIN_DEFAULT, Importance.LOW,
ACCESS_CONTROL_ALLOW_ORIGIN_DOC)
.define(ACCESS_CONTROL_ALLOW_METHODS_CONFIG, Type.STRING,
ACCESS_CONTROL_ALLOW_METHODS_DEFAULT, Importance.LOW,
ACCESS_CONTROL_ALLOW_METHODS_DOC)
.define(
PLUGIN_PATH_CONFIG,
Type.LIST,
null,
Importance.LOW,
PLUGIN_PATH_DOC
);
}