本文整理汇总了Java中org.apache.flink.configuration.IllegalConfigurationException类的典型用法代码示例。如果您正苦于以下问题:Java IllegalConfigurationException类的具体用法?Java IllegalConfigurationException怎么用?Java IllegalConfigurationException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IllegalConfigurationException类属于org.apache.flink.configuration包,在下文中一共展示了IllegalConfigurationException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: forSingleJobAppMaster
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Creates the high-availability services for a single-job Flink YARN application, to be
* used in the Application Master that runs both ResourceManager and JobManager.
*
* @param flinkConfig The Flink configuration.
* @param hadoopConfig The Hadoop configuration for the YARN cluster.
*
* @return The created high-availability services.
*
* @throws IOException Thrown, if the high-availability services could not be initialized.
*/
public static YarnHighAvailabilityServices forSingleJobAppMaster(
Configuration flinkConfig,
org.apache.hadoop.conf.Configuration hadoopConfig) throws IOException {
checkNotNull(flinkConfig, "flinkConfig");
checkNotNull(hadoopConfig, "hadoopConfig");
final HighAvailabilityMode mode = HighAvailabilityMode.fromConfig(flinkConfig);
switch (mode) {
case NONE:
return new YarnIntraNonHaMasterServices(flinkConfig, hadoopConfig);
case ZOOKEEPER:
throw new UnsupportedOperationException("to be implemented");
default:
throw new IllegalConfigurationException("Unrecognized high availability mode: " + mode);
}
}
示例2: forYarnTaskManager
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Creates the high-availability services for the TaskManagers participating in
* a Flink YARN application.
*
* @param flinkConfig The Flink configuration.
* @param hadoopConfig The Hadoop configuration for the YARN cluster.
*
* @return The created high-availability services.
*
* @throws IOException Thrown, if the high-availability services could not be initialized.
*/
public static YarnHighAvailabilityServices forYarnTaskManager(
Configuration flinkConfig,
org.apache.hadoop.conf.Configuration hadoopConfig) throws IOException {
checkNotNull(flinkConfig, "flinkConfig");
checkNotNull(hadoopConfig, "hadoopConfig");
final HighAvailabilityMode mode = HighAvailabilityMode.fromConfig(flinkConfig);
switch (mode) {
case NONE:
return new YarnPreConfiguredMasterNonHaServices(
flinkConfig,
hadoopConfig,
HighAvailabilityServicesUtils.AddressResolution.TRY_ADDRESS_RESOLUTION);
case ZOOKEEPER:
throw new UnsupportedOperationException("to be implemented");
default:
throw new IllegalConfigurationException("Unrecognized high availability mode: " + mode);
}
}
示例3: checkNetworkBufferConfig
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Validates the (new) network buffer configuration.
*
* @param pageSize size of memory buffers
* @param networkBufFraction fraction of JVM memory to use for network buffers
* @param networkBufMin minimum memory size for network buffers (in bytes)
* @param networkBufMax maximum memory size for network buffers (in bytes)
*
* @throws IllegalConfigurationException if the condition does not hold
*/
protected static void checkNetworkBufferConfig(
final int pageSize, final float networkBufFraction, final long networkBufMin,
final long networkBufMax) throws IllegalConfigurationException {
checkConfigParameter(networkBufFraction > 0.0f && networkBufFraction < 1.0f, networkBufFraction,
TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION.key(),
"Network buffer memory fraction of the free memory must be between 0.0 and 1.0");
checkConfigParameter(networkBufMin >= pageSize, networkBufMin,
TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN.key(),
"Minimum memory for network buffers must allow at least one network " +
"buffer with respect to the memory segment size");
checkConfigParameter(networkBufMax >= pageSize, networkBufMax,
TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX.key(),
"Maximum memory for network buffers must allow at least one network " +
"buffer with respect to the memory segment size");
checkConfigParameter(networkBufMax >= networkBufMin, networkBufMax,
TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX.key(),
"Maximum memory for network buffers must not be smaller than minimum memory (" +
TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX.key() + ": " + networkBufMin + ")");
}
示例4: parameterOrConfigured
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Nullable
private static Path parameterOrConfigured(@Nullable Path path, Configuration config, ConfigOption<String> option) {
if (path != null) {
return path;
}
else {
String configValue = config.getString(option);
try {
return configValue == null ? null : new Path(configValue);
}
catch (IllegalArgumentException e) {
throw new IllegalConfigurationException("Cannot parse value for " + option.key() +
" : " + configValue + " . Not a valid path.");
}
}
}
示例5: createFromConfig
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Override
public FsStateBackend createFromConfig(Configuration config) throws IllegalConfigurationException {
// we need to explicitly read the checkpoint directory here, because that
// is a required constructor parameter
final String checkpointDir = config.getString(CheckpointingOptions.CHECKPOINTS_DIRECTORY);
if (checkpointDir == null) {
throw new IllegalConfigurationException(
"Cannot create the file system state backend: The configuration does not specify the " +
"checkpoint directory '" + CheckpointingOptions.CHECKPOINTS_DIRECTORY.key() + '\'');
}
try {
return new FsStateBackend(checkpointDir).configure(config);
}
catch (IllegalArgumentException e) {
throw new IllegalConfigurationException("Invalid configuration for the state backend", e);
}
}
示例6: writeMyIdToDataDir
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Write 'myid' file to the 'dataDir' in the given ZooKeeper configuration.
*
* <blockquote>
* Every machine that is part of the ZooKeeper ensemble should know about every other machine in
* the ensemble. You accomplish this with the series of lines of the form
* server.id=host:port:port. The parameters host and port are straightforward. You attribute the
* server id to each machine by creating a file named myid, one for each server, which resides
* in that server's data directory, as specified by the configuration file parameter dataDir.
* </blockquote>
*
* @param zkProps ZooKeeper configuration.
* @param id The ID of this {@link QuorumPeer}.
* @throws IllegalConfigurationException Thrown, if 'dataDir' property not set in given
* ZooKeeper properties.
* @throws IOException Thrown, if 'dataDir' does not exist and cannot be
* created.
* @see <a href="http://zookeeper.apache.org/doc/r3.4.6/zookeeperAdmin.html">
* ZooKeeper Administrator's Guide</a>
*/
private static void writeMyIdToDataDir(Properties zkProps, int id) throws IOException {
// Check dataDir and create if necessary
if (zkProps.getProperty("dataDir") == null) {
throw new IllegalConfigurationException("No dataDir configured.");
}
File dataDir = new File(zkProps.getProperty("dataDir"));
if (!dataDir.isDirectory() && !dataDir.mkdirs()) {
throw new IOException("Cannot create dataDir '" + dataDir + "'.");
}
dataDir.deleteOnExit();
LOG.info("Writing {} to myid file in 'dataDir'.", id);
// Write myid to file. We use a File Writer, because that properly propagates errors,
// while the PrintWriter swallows errors
try (FileWriter writer = new FileWriter(new File(dataDir, "myid"))) {
writer.write(String.valueOf(id));
}
}
示例7: testResumeFromYarnPropertiesFileWithFinishedApplication
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Test(expected = IllegalConfigurationException.class)
public void testResumeFromYarnPropertiesFileWithFinishedApplication() throws Exception {
File directoryPath = writeYarnPropertiesFile(validPropertiesFile);
// start CLI Frontend
TestCLI frontend = new CustomYarnTestCLI(directoryPath.getAbsolutePath(), FinalApplicationStatus.SUCCEEDED);
RunOptions options = CliFrontendParser.parseRunCommand(new String[] {});
frontend.retrieveClient(options);
checkJobManagerAddress(
frontend.getConfiguration(),
TEST_YARN_JOB_MANAGER_ADDRESS,
TEST_YARN_JOB_MANAGER_PORT);
}
示例8: testInvalidYarnPropertiesFile
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Test(expected = IllegalConfigurationException.class)
public void testInvalidYarnPropertiesFile() throws Exception {
File directoryPath = writeYarnPropertiesFile(invalidPropertiesFile);
TestCLI frontend = new CustomYarnTestCLI(directoryPath.getAbsolutePath());
RunOptions options = CliFrontendParser.parseRunCommand(new String[] {});
frontend.retrieveClient(options);
Configuration config = frontend.getConfiguration();
checkJobManagerAddress(
config,
TEST_JOB_MANAGER_ADDRESS,
TEST_JOB_MANAGER_PORT);
}
示例9: testResumeFromInvalidYarnID
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Test(expected = IllegalConfigurationException.class)
public void testResumeFromInvalidYarnID() throws Exception {
File directoryPath = writeYarnPropertiesFile(validPropertiesFile);
// start CLI Frontend
TestCLI frontend = new CustomYarnTestCLI(directoryPath.getAbsolutePath(), FinalApplicationStatus.SUCCEEDED);
RunOptions options =
CliFrontendParser.parseRunCommand(new String[] {"-yid", ApplicationId.newInstance(0, 666).toString()});
frontend.retrieveClient(options);
checkJobManagerAddress(
frontend.getConfiguration(),
TEST_YARN_JOB_MANAGER_ADDRESS,
TEST_YARN_JOB_MANAGER_PORT);
}
示例10: testResumeFromYarnIDWithFinishedApplication
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Test(expected = IllegalConfigurationException.class)
public void testResumeFromYarnIDWithFinishedApplication() throws Exception {
File directoryPath = writeYarnPropertiesFile(validPropertiesFile);
// start CLI Frontend
TestCLI frontend = new CustomYarnTestCLI(directoryPath.getAbsolutePath(), FinalApplicationStatus.SUCCEEDED);
RunOptions options =
CliFrontendParser.parseRunCommand(new String[] {"-yid", TEST_YARN_APPLICATION_ID.toString()});
frontend.retrieveClient(options);
checkJobManagerAddress(
frontend.getConfiguration(),
TEST_YARN_JOB_MANAGER_ADDRESS,
TEST_YARN_JOB_MANAGER_PORT);
}
示例11: testConfigureEmptyConfig
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Test
public void testConfigureEmptyConfig() {
try {
Configuration config = new Configuration();
// empty configuration, plus no fields on the format itself is not valid
try {
format.configure(config);
fail(); // should give an error
} catch (IllegalConfigurationException e) {
; // okay
}
}
catch (Exception ex) {
Assert.fail("Test failed due to a " + ex.getClass().getSimpleName() + ": " + ex.getMessage());
}
}
示例12: createFromConfig
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
@Override
public RocksDBStateBackend createFromConfig(Configuration config)
throws IllegalConfigurationException, IOException {
// we need to explicitly read the checkpoint directory here, because that
// is a required constructor parameter
final String checkpointDirURI = config.getString(CheckpointingOptions.CHECKPOINTS_DIRECTORY);
if (checkpointDirURI == null) {
throw new IllegalConfigurationException(
"Cannot create the RocksDB state backend: The configuration does not specify the " +
"checkpoint directory '" + CheckpointingOptions.CHECKPOINTS_DIRECTORY.key() + '\'');
}
return new RocksDBStateBackend(checkpointDirURI).configure(config);
}
示例13: RocksDBStateBackend
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Private constructor that creates a re-configured copy of the state backend.
*
* @param original The state backend to re-configure.
* @param config The configuration.
*/
private RocksDBStateBackend(RocksDBStateBackend original, Configuration config) {
// reconfigure the state backend backing the streams
final StateBackend originalStreamBackend = original.checkpointStreamBackend;
this.checkpointStreamBackend = originalStreamBackend instanceof ConfigurableStateBackend ?
((ConfigurableStateBackend) originalStreamBackend).configure(config) :
originalStreamBackend;
// configure incremental checkpoints
if (original.enableIncrementalCheckpointing != null) {
this.enableIncrementalCheckpointing = original.enableIncrementalCheckpointing;
}
else {
this.enableIncrementalCheckpointing =
config.getBoolean(CheckpointingOptions.INCREMENTAL_CHECKPOINTS);
}
// configure local directories
if (original.localRocksDbDirectories != null) {
this.localRocksDbDirectories = original.localRocksDbDirectories;
}
else {
final String rocksdbLocalPaths = config.getString(CheckpointingOptions.ROCKSDB_LOCAL_DIRECTORIES);
if (rocksdbLocalPaths != null) {
String[] directories = rocksdbLocalPaths.split(",|" + File.pathSeparator);
try {
setDbStoragePaths(directories);
}
catch (IllegalArgumentException e) {
throw new IllegalConfigurationException("Invalid configuration for RocksDB state " +
"backend's local storage directories: " + e.getMessage(), e);
}
}
}
// copy remaining settings
this.predefinedOptions = original.predefinedOptions;
this.optionsFactory = original.optionsFactory;
}
示例14: unresolvedHostToNormalizedString
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Returns an address in a normalized format for Akka.
* When an IPv6 address is specified, it normalizes the IPv6 address to avoid
* complications with the exact URL match policy of Akka.
* @param host The hostname, IPv4 or IPv6 address
* @return host which will be normalized if it is an IPv6 address
*/
public static String unresolvedHostToNormalizedString(String host) {
// Return loopback interface address if host is null
// This represents the behavior of {@code InetAddress.getByName } and RFC 3330
if (host == null) {
host = InetAddress.getLoopbackAddress().getHostAddress();
} else {
host = host.trim().toLowerCase();
}
// normalize and valid address
if (IPAddressUtil.isIPv6LiteralAddress(host)) {
byte[] ipV6Address = IPAddressUtil.textToNumericFormatV6(host);
host = getIPv6UrlRepresentation(ipV6Address);
} else if (!IPAddressUtil.isIPv4LiteralAddress(host)) {
try {
// We don't allow these in hostnames
Preconditions.checkArgument(!host.startsWith("."));
Preconditions.checkArgument(!host.endsWith("."));
Preconditions.checkArgument(!host.contains(":"));
} catch (Exception e) {
throw new IllegalConfigurationException("The configured hostname is not valid", e);
}
}
return host;
}
示例15: initialize
import org.apache.flink.configuration.IllegalConfigurationException; //导入依赖的package包/类
/**
* Initializes the shared file system settings.
*
* <p>The given configuration is passed to each file system factory to initialize the respective
* file systems. Because the configuration of file systems may be different subsequent to the call
* of this method, this method clears the file system instance cache.
*
* <p>This method also reads the default file system URI from the configuration key
* {@link CoreOptions#DEFAULT_FILESYSTEM_SCHEME}. All calls to {@link FileSystem#get(URI)} where
* the URI has no scheme will be interpreted as relative to that URI.
* As an example, assume the default file system URI is set to {@code 'hdfs://localhost:9000/'}.
* A file path of {@code '/user/USERNAME/in.txt'} is interpreted as
* {@code 'hdfs://localhost:9000/user/USERNAME/in.txt'}.
*
* @param config the configuration from where to fetch the parameter.
*/
public static void initialize(Configuration config) throws IOException, IllegalConfigurationException {
LOCK.lock();
try {
// make sure file systems are re-instantiated after re-configuration
CACHE.clear();
FS_FACTORIES.clear();
// configure all file system factories
for (FileSystemFactory factory : RAW_FACTORIES) {
factory.configure(config);
String scheme = factory.getScheme();
FileSystemFactory fsf = ConnectionLimitingFactory.decorateIfLimited(factory, scheme, config);
FS_FACTORIES.put(scheme, fsf);
}
// configure the default (fallback) factory
FALLBACK_FACTORY.configure(config);
// also read the default file system scheme
final String stringifiedUri = config.getString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, null);
if (stringifiedUri == null) {
DEFAULT_SCHEME = null;
}
else {
try {
DEFAULT_SCHEME = new URI(stringifiedUri);
}
catch (URISyntaxException e) {
throw new IllegalConfigurationException("The default file system scheme ('" +
CoreOptions.DEFAULT_FILESYSTEM_SCHEME + "') is invalid: " + stringifiedUri, e);
}
}
}
finally {
LOCK.unlock();
}
}