本文整理匯總了Java中org.apache.flink.configuration.Configuration.setInteger方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setInteger方法的具體用法?Java Configuration.setInteger怎麽用?Java Configuration.setInteger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flink.configuration.Configuration
的用法示例。
在下文中一共展示了Configuration.setInteger方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deploy
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Override
public YarnClusterClient deploy() {
ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class);
context.setApplicationId(job.yarnAppId());
ApplicationReport report;
try {
report = startAppMaster(context);
Configuration conf = getFlinkConfiguration();
conf.setString(JobManagerOptions.ADDRESS.key(), report.getHost());
conf.setInteger(JobManagerOptions.PORT.key(), report.getRpcPort());
return createYarnClusterClient(this, yarnClient, report, conf, false);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例2: setupBlobServer
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Before
public void setupBlobServer() throws IOException {
Configuration config = new Configuration();
// always offload the serialized job and task information
config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0);
blobServer = Mockito.spy(new BlobServer(config, new VoidBlobStore()));
blobWriter = blobServer;
blobCache = blobServer;
seenHashes.clear();
// verify that we do not upload the same content more than once
doAnswer(
invocation -> {
PermanentBlobKey key = (PermanentBlobKey) invocation.callRealMethod();
assertTrue(seenHashes.add(key.getHash()));
return key;
}
).when(blobServer).putPermanent(any(JobID.class), Matchers.<byte[]>any());
blobServer.start();
}
示例3: setup
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Before
public void setup() throws Exception {
// detect parameter change
if (currentBackend != backend) {
shutDownExistingCluster();
currentBackend = backend;
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTaskManagers);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, slotsPerTaskManager);
final File checkpointDir = temporaryFolder.newFolder();
final File savepointDir = temporaryFolder.newFolder();
config.setString(CheckpointingOptions.STATE_BACKEND, currentBackend);
config.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir.toURI().toString());
config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString());
cluster = new TestingCluster(config);
cluster.start();
}
}
示例4: startCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void startCluster() {
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_TASK_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
}
catch (Exception e) {
e.printStackTrace();
fail("Failed to start test cluster: " + e.getMessage());
}
}
示例5: generateTaskManagerConfiguration
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Generate a task manager configuration.
* @param baseConfig Config to start from.
* @param jobManagerHostname Job manager host name.
* @param jobManagerPort Port of the job manager.
* @param numSlots Number of slots to configure.
* @param registrationTimeout Timeout for registration
* @return TaskManager configuration
*/
public static Configuration generateTaskManagerConfiguration(
Configuration baseConfig,
String jobManagerHostname,
int jobManagerPort,
int numSlots,
FiniteDuration registrationTimeout) {
Configuration cfg = baseConfig.clone();
if (jobManagerHostname != null && !jobManagerHostname.isEmpty()) {
cfg.setString(JobManagerOptions.ADDRESS, jobManagerHostname);
}
if (jobManagerPort > 0) {
cfg.setInteger(JobManagerOptions.PORT, jobManagerPort);
}
cfg.setString(ConfigConstants.TASK_MANAGER_MAX_REGISTRATION_DURATION, registrationTimeout.toString());
if (numSlots != -1){
cfg.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlots);
}
return cfg;
}
示例6: execute
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
static LocalFlinkMiniCluster execute(LocalStreamEnvironment env,
Configuration conf, String jobName) throws Exception {
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setJobName(jobName);
JobGraph jobGraph = streamGraph.getJobGraph();
Configuration configuration = new Configuration(conf);
configuration.addAll(jobGraph.getJobConfiguration());
configuration.setLong("taskmanager.memory.size", -1L);
configuration.setInteger("taskmanager.numberOfTaskSlots", jobGraph.getMaximumParallelism());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(configuration, true);
cluster.start();
cluster.submitJobDetached(jobGraph);
return cluster;
}
示例7: prepare
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void prepare() throws IOException, ClassNotFoundException {
LOG.info("-------------------------------------------------------------------------");
LOG.info(" Starting KafkaShortRetentionTestBase ");
LOG.info("-------------------------------------------------------------------------");
Configuration flinkConfig = new Configuration();
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
if (kafkaServer.isSecureRunSupported()) {
secureProps = kafkaServer.getSecureProperties();
}
Properties specificProperties = new Properties();
specificProperties.setProperty("log.retention.hours", "0");
specificProperties.setProperty("log.retention.minutes", "0");
specificProperties.setProperty("log.retention.ms", "250");
specificProperties.setProperty("log.retention.check.interval.ms", "100");
kafkaServer.prepare(kafkaServer.createConfig().setKafkaServerProperties(specificProperties));
standardProps = kafkaServer.getStandardProperties();
// start also a re-usable Flink mini cluster
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, TM_SLOTS);
flinkConfig.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 16L);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
TestStreamEnvironment.setAsContext(flink, PARALLELISM);
}
示例8: setupCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TM);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, SLOTS_PER_TM);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
env = new TestEnvironment(cluster, NUM_TM * SLOTS_PER_TM, false);
}
示例9: configureZooKeeperHA
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Sets all necessary configuration keys to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*
* @param config Configuration to use
* @param zooKeeperQuorum ZooKeeper quorum to connect to
* @param fsStateHandlePath Base path for file system state backend (for checkpoints and
* recovery)
* @return The modified configuration to operate in {@link HighAvailabilityMode#ZOOKEEPER}.
*/
public static Configuration configureZooKeeperHA(
Configuration config,
String zooKeeperQuorum,
String fsStateHandlePath) {
checkNotNull(config, "Configuration");
checkNotNull(zooKeeperQuorum, "ZooKeeper quorum");
checkNotNull(fsStateHandlePath, "File state handle backend path");
// Web frontend, you have been dismissed. Sorry.
config.setInteger(WebOptions.PORT, -1);
// ZooKeeper recovery mode
config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperQuorum);
int connTimeout = 5000;
if (System.getenv().containsKey("CI")) {
// The regular timeout is to aggressive for Travis and connections are often lost.
connTimeout = 30000;
}
config.setInteger(HighAvailabilityOptions.ZOOKEEPER_CONNECTION_TIMEOUT, connTimeout);
config.setInteger(HighAvailabilityOptions.ZOOKEEPER_SESSION_TIMEOUT, connTimeout);
// File system state backend
config.setString(CheckpointingOptions.STATE_BACKEND, "FILESYSTEM");
config.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, fsStateHandlePath + "/checkpoints");
config.setString(HighAvailabilityOptions.HA_STORAGE_PATH, fsStateHandlePath + "/recovery");
// Akka failure detection and execution retries
config.setString(AkkaOptions.WATCH_HEARTBEAT_INTERVAL, "1000 ms");
config.setString(AkkaOptions.WATCH_HEARTBEAT_PAUSE, "6 s");
config.setInteger(AkkaOptions.WATCH_THRESHOLD, 9);
config.setString(AkkaOptions.ASK_TIMEOUT, "100 s");
config.setString(HighAvailabilityOptions.HA_JOB_DELAY, "10 s");
return config;
}
示例10: setupBlobServer
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Before
@Override
public void setupBlobServer() throws IOException {
Configuration config = new Configuration();
// always offload the serialized job and task information
config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0);
blobServer = new BlobServer(config, new VoidBlobStore());
blobServer.start();
blobWriter = blobServer;
InetSocketAddress serverAddress = new InetSocketAddress("localhost", blobServer.getPort());
blobCache = new PermanentBlobCache(serverAddress, config, new VoidBlobStore());
}
示例11: main
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* All arguments are parsed to a {@link Configuration} and passed to the Taskmanager,
* for instance: <code>--high-availability ZOOKEEPER --high-availability.zookeeper.quorum "xyz:123:456"</code>.
*/
public static void main(String[] args) throws Exception {
try {
Configuration config = ParameterTool.fromArgs(args).getConfiguration();
if (!config.contains(TaskManagerOptions.MANAGED_MEMORY_SIZE)) {
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
}
if (!config.contains(TaskManagerOptions.NETWORK_NUM_BUFFERS)) {
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 100);
}
LOG.info("Configuration: {}.", config);
// Run the TaskManager
TaskManager.selectNetworkInterfaceAndRunTaskManager(
config,
ResourceID.generate(),
TaskManager.class);
// Run forever
new CountDownLatch(1).await();
}
catch (Throwable t) {
LOG.error("Failed to start TaskManager process", t);
System.exit(1);
}
}
示例12: hasNewNetworkBufConfOld
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Verifies that {@link TaskManagerServicesConfiguration#hasNewNetworkBufConf(Configuration)}
* returns the correct result for old configurations via
* {@link TaskManagerOptions#NETWORK_NUM_BUFFERS}.
*/
@SuppressWarnings("deprecation")
@Test
public void hasNewNetworkBufConfOld() throws Exception {
Configuration config = new Configuration();
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 1);
assertFalse(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config));
}
示例13: getConfigurations
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@Parameters
public static Collection<Object[]> getConfigurations() throws FileNotFoundException, IOException {
LinkedList<Configuration> tConfigs = new LinkedList<Configuration>();
for (int i = 1; i <= NUM_PROGRAMS; i++) {
Configuration config = new Configuration();
config.setInteger("ProgramId", i);
tConfigs.add(config);
}
return TestBaseUtils.toParameterList(tConfigs);
}
示例14: RemoteExecutor
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
public RemoteExecutor(InetSocketAddress inet, Configuration clientConfiguration,
List<URL> jarFiles, List<URL> globalClasspaths) {
this.clientConfiguration = clientConfiguration;
this.jarFiles = jarFiles;
this.globalClasspaths = globalClasspaths;
clientConfiguration.setString(JobManagerOptions.ADDRESS, inet.getHostName());
clientConfiguration.setInteger(JobManagerOptions.PORT, inet.getPort());
}
示例15: setupCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, PARALLELISM);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
TestEnvironment.setAsContext(cluster, PARALLELISM);
}