本文整理匯總了Java中org.apache.flink.configuration.Configuration.setString方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setString方法的具體用法?Java Configuration.setString怎麽用?Java Configuration.setString使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flink.configuration.Configuration
的用法示例。
在下文中一共展示了Configuration.setString方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deploy
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Override
public YarnClusterClient deploy() {
ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class);
context.setApplicationId(job.yarnAppId());
ApplicationReport report;
try {
report = startAppMaster(context);
Configuration conf = getFlinkConfiguration();
conf.setString(JobManagerOptions.ADDRESS.key(), report.getHost());
conf.setInteger(JobManagerOptions.PORT.key(), report.getRpcPort());
return createYarnClusterClient(this, yarnClient, report, conf, false);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例2: getSink
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* 通用入HDFS-Map
* @param topic topic名
* @return SaveHdfsSink
*/
public static SinkFunction<String> getSink(String topic) {
Configuration configuration = new Configuration();
//TODO 添加Hadoop配置內容
configuration.setString("dfs.namenode.name.dir", "file:///home/hadmin/data/hadoop/hdfs/name");
configuration.setString("dfs.nameservices", "ns");
configuration.setString("dfs.ha.namenodes.ns", "nn1,nn2");
configuration.setString("dfs.namenode.rpc-address.ns.nn1", "10.11.0.193:9000");
configuration.setString("dfs.namenode.rpc-address.ns.nn2", "10.11.0.194:9000");
configuration.setString("dfs.namenode.shared.edits.dir", "qjournal://10.11.0.193:8485;10.11.0.194;10.11.0.195:8485/ns");
configuration.setString("hadoop.tmp.dir", "/home/hadmin/data/hadoop/tmp");
configuration.setString("fs.defaultFS", "hdfs://ns");
configuration.setString("dfs.journalnode.edits.dir", "/home/hadmin/data/hadoop/journal");
configuration.setString("ha.zookeeper.quorum", "10.11.0.193:2181,10.11.0.194:2181,10.11.0.195:2181");
configuration.setString("mapreduce.input.fileinputformat.split.minsize", "10");
TODBucketingSink<String> sink = new TODBucketingSink<>("/xml/" + topic + "/");
sink.setBucketer(new DateTimeBucketer<>("yyyy/MM/dd/HH"));
sink.setWriter(new StringWriter<>());
sink.setPendingPrefix("source");
sink.setPendingSuffix(".txt");
sink.setFSConfig(configuration);
//設置Flink Bucketer 的刷新時間
sink.setAsyncTimeout(60000L);
return sink;
}
示例3: loadConfiguration
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Loads the global configuration, adds the given dynamic properties configuration, and sets
* the temp directory paths.
*
* @param dynamicProperties dynamic properties to integrate
* @param log logger instance
* @return the loaded and adapted global configuration
*/
public static Configuration loadConfiguration(Configuration dynamicProperties, Logger log) {
Configuration configuration =
GlobalConfiguration.loadConfigurationWithDynamicProperties(dynamicProperties);
// read the environment variables
final Map<String, String> envs = System.getenv();
final String tmpDirs = envs.get(MesosConfigKeys.ENV_FLINK_TMP_DIR);
// configure local directory
if (configuration.contains(CoreOptions.TMP_DIRS)) {
log.info("Overriding Mesos' temporary file directories with those " +
"specified in the Flink config: " + configuration.getValue(CoreOptions.TMP_DIRS));
}
else if (tmpDirs != null) {
log.info("Setting directories for temporary files to: {}", tmpDirs);
configuration.setString(CoreOptions.TMP_DIRS, tmpDirs);
}
return configuration;
}
示例4: testYarnIDOverridesPropertiesFile
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Test
public void testYarnIDOverridesPropertiesFile() throws Exception {
File directoryPath = writeYarnPropertiesFile(validPropertiesFile);
final Configuration configuration = new Configuration();
configuration.setString(YarnConfigOptions.PROPERTIES_FILE_LOCATION, directoryPath.getAbsolutePath());
final FlinkYarnSessionCli flinkYarnSessionCli = new FlinkYarnSessionCli(
configuration,
tmp.getRoot().getAbsolutePath(),
"y",
"yarn");
final CommandLine commandLine = flinkYarnSessionCli.parseCommandLineOptions(new String[] {"-yid", TEST_YARN_APPLICATION_ID_2.toString() }, true);
final ApplicationId clusterId = flinkYarnSessionCli.getClusterId(commandLine);
assertEquals(TEST_YARN_APPLICATION_ID_2, clusterId);
}
示例5: testMultipleReporterInstantiation
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Verifies that multiple reporters are instantiated correctly.
*/
@Test
public void testMultipleReporterInstantiation() {
Configuration config = new Configuration();
config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test1." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter11.class.getName());
config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test2." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter12.class.getName());
config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test3." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter13.class.getName());
MetricRegistryImpl metricRegistry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(config));
assertTrue(metricRegistry.getReporters().size() == 3);
Assert.assertTrue(TestReporter11.wasOpened);
Assert.assertTrue(TestReporter12.wasOpened);
Assert.assertTrue(TestReporter13.wasOpened);
metricRegistry.shutdown();
}
示例6: testClusterClientCancelWithSavepoint
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Test
public void testClusterClientCancelWithSavepoint() throws Exception {
Configuration config = new Configuration();
config.setString(JobManagerOptions.ADDRESS, "localhost");
JobID jobID = new JobID();
String savepointDirectory = "/test/directory";
String savepointPath = "/test/path";
TestCancelWithSavepointActorGateway gateway = new TestCancelWithSavepointActorGateway(jobID, savepointDirectory, savepointPath);
TestClusterClient clusterClient = new TestClusterClient(config, gateway);
try {
String path = clusterClient.cancelWithSavepoint(jobID, savepointDirectory);
Assert.assertTrue(gateway.messageArrived);
Assert.assertEquals(savepointPath, path);
} finally {
clusterClient.shutdown();
}
}
示例7: substituteDeprecatedConfigPrefix
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Sets the value of a new config key to the value of a deprecated config key. Taking into
* account the changed prefix.
* @param config Config to write
* @param deprecatedPrefix Old prefix of key
* @param designatedPrefix New prefix of key
*/
public static void substituteDeprecatedConfigPrefix(
Configuration config,
String deprecatedPrefix,
String designatedPrefix) {
// set the designated key only if it is not set already
final int prefixLen = deprecatedPrefix.length();
Configuration replacement = new Configuration();
for (String key : config.keySet()) {
if (key.startsWith(deprecatedPrefix)) {
String newKey = designatedPrefix + key.substring(prefixLen);
if (!config.containsKey(newKey)) {
replacement.setString(newKey, config.getString(key, null));
}
}
}
config.addAll(replacement);
}
示例8: startSSLServer
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Starts the SSL enabled BLOB server.
*/
@BeforeClass
public static void startSSLServer() throws IOException {
Configuration config = new Configuration();
config.setString(BlobServerOptions.STORAGE_DIRECTORY,
temporarySslFolder.newFolder().getAbsolutePath());
config.setBoolean(SecurityOptions.SSL_ENABLED, true);
config.setString(SecurityOptions.SSL_KEYSTORE, "src/test/resources/local127.keystore");
config.setString(SecurityOptions.SSL_KEYSTORE_PASSWORD, "password");
config.setString(SecurityOptions.SSL_KEY_PASSWORD, "password");
blobSslServer = new BlobServer(config, new VoidBlobStore());
blobSslServer.start();
sslClientConfig = new Configuration();
sslClientConfig.setBoolean(SecurityOptions.SSL_ENABLED, true);
sslClientConfig.setString(SecurityOptions.SSL_TRUSTSTORE, "src/test/resources/local127.truststore");
sslClientConfig.setString(SecurityOptions.SSL_TRUSTSTORE_PASSWORD, "password");
}
示例9: testCorrectnessOfAllGroupReduceForTuplesWithCombine
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Test
public void testCorrectnessOfAllGroupReduceForTuplesWithCombine() throws Exception {
/*
* check correctness of all-groupreduce for tuples with combine
*/
org.junit.Assume.assumeTrue(mode != TestExecutionMode.COLLECTION);
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env)
.map(new IdentityMapper<Tuple3<Integer, Long, String>>()).setParallelism(4);
Configuration cfg = new Configuration();
cfg.setString(Optimizer.HINT_SHIP_STRATEGY, Optimizer.HINT_SHIP_STRATEGY_REPARTITION);
DataSet<Tuple2<Integer, String>> reduceDs = ds.reduceGroup(new Tuple3AllGroupReduceWithCombine())
.withParameters(cfg);
List<Tuple2<Integer, String>> result = reduceDs.collect();
String expected = "322," +
"testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest\n";
compareResultAsTuples(result, expected);
}
示例10: before
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Before
public void before() throws Exception {
system = AkkaUtils.createLocalActorSystem(new Configuration());
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 1);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
config.setString(AkkaOptions.ASK_TIMEOUT, TestingUtils.DEFAULT_AKKA_ASK_TIMEOUT());
TestingCluster testingCluster = new TestingCluster(config, false, true);
testingCluster.start();
jobManagerGateway = testingCluster.getLeaderGateway(TestingUtils.TESTING_DURATION());
taskManager = testingCluster.getTaskManagersAsJava().get(0);
// generate test data
for (int i = 0; i < NUM_ITERATIONS; i++) {
inputData.add(i, String.valueOf(i + 1));
}
NotifyingMapper.finished = false;
}
示例11: testCreateAthenaXCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Test
public void testCreateAthenaXCluster() throws Exception {
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
Configuration flinkConf = new Configuration();
flinkConf.setString(JobManagerOptions.ADDRESS, "localhost");
try (MiniAthenaXCluster cluster = new MiniAthenaXCluster(JobDeployerITest.class.getSimpleName())) {
cluster.start();
YarnConfiguration conf = cluster.getYarnConfiguration();
YarnClusterConfiguration clusterConf = cluster.getYarnClusterConf();
final ApplicationId appId;
try (YarnClient client = YarnClient.createYarnClient()) {
client.init(conf);
client.start();
JobDeployer deployer = new JobDeployer(clusterConf, client, executor, flinkConf);
appId = deployer.createApplication();
InstanceMetadata md = new InstanceMetadata(UUID.randomUUID(), UUID.randomUUID());
JobConf jobConf = new JobConf(appId, "test", Collections.emptyList(), null, 1, 2048, md);
deployer.start(JobITestUtil.trivialJobGraph(), jobConf);
YarnApplicationState state = MiniAthenaXCluster.pollFinishedApplicationState(client, appId);
assertEquals(FINISHED, state);
}
}
}
示例12: testBlobNoJobFetchWithTooManyFailures
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* A test where the connection fails too often and eventually fails the GET request
* (job-unrelated blob).
*/
@Test
public void testBlobNoJobFetchWithTooManyFailures() throws IOException {
final Configuration config = new Configuration();
config.setString(BlobServerOptions.STORAGE_DIRECTORY,
temporaryFolder.newFolder().getAbsolutePath());
testBlobFetchWithTooManyFailures(config, new VoidBlobStore(), null, TRANSIENT_BLOB);
}
示例13: before
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Before
public void before() throws TimeoutException, InterruptedException {
jobId = HighAvailabilityServices.DEFAULT_JOB_ID;
Tasks.BlockingOnceReceiver$.MODULE$.blocking_$eq(true);
Configuration configuration = new Configuration();
configuration.setInteger(ConfigConstants.LOCAL_NUMBER_JOB_MANAGER, 1);
configuration.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTMs);
configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTM);
configuration.setString(ConfigConstants.RESTART_STRATEGY, "fixeddelay");
configuration.setInteger(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, 9999);
configuration.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "100 milli");
highAvailabilityServices = new TestingManualHighAvailabilityServices();
cluster = new TestingCluster(
configuration,
highAvailabilityServices,
true,
false);
cluster.start(false);
// wait for actors to be alive so that they have started their leader retrieval service
cluster.waitForActorsToBeAlive();
}
示例14: testApplicationDefinedHasPrecedence
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Test
public void testApplicationDefinedHasPrecedence() throws Exception {
final StateBackend appBackend = Mockito.mock(StateBackend.class);
final Configuration config = new Configuration();
config.setString(backendKey, "jobmanager");
StateBackend backend = StateBackendLoader.fromApplicationOrConfigOrDefault(appBackend, config, cl, null);
assertEquals(appBackend, backend);
}
示例15: testBlobForJobFetchRetries
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* A test where the connection fails twice and then the get operation succeeds
* (job-related blob).
*/
@Test
public void testBlobForJobFetchRetries() throws IOException {
final Configuration config = new Configuration();
config.setString(BlobServerOptions.STORAGE_DIRECTORY,
temporaryFolder.newFolder().getAbsolutePath());
testBlobFetchRetries(config, new VoidBlobStore(), new JobID(), TRANSIENT_BLOB);
}