本文整理匯總了Java中org.apache.flink.configuration.Configuration.setLong方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setLong方法的具體用法?Java Configuration.setLong怎麽用?Java Configuration.setLong使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flink.configuration.Configuration
的用法示例。
在下文中一共展示了Configuration.setLong方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testInterruptableSharedLockInInvokeAndCancel
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* The invoke() method holds a lock (trigger awaitLatch after acquisition)
* and cancel cannot complete because it also tries to acquire the same lock.
* This is resolved by the watch dog, no fatal error.
*/
@Test
public void testInterruptableSharedLockInInvokeAndCancel() throws Exception {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, 5);
config.setLong(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, 50);
Task task = createTask(InvokableInterruptableSharedLockInInvokeAndCancel.class, config);
task.startTaskThread();
awaitLatch.await();
task.cancelExecution();
task.getExecutingThread().join();
// No fatal error
for (Object msg : taskManagerMessages) {
assertFalse("Unexpected FatalError message", msg instanceof TaskManagerMessages.FatalError);
}
}
示例2: testWatchDogInterruptsTask
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Tests that interrupt happens via watch dog if canceller is stuck in cancel.
* Task cancellation blocks the task canceller. Interrupt after cancel via
* cancellation watch dog.
*/
@Test
public void testWatchDogInterruptsTask() throws Exception {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.TASK_CANCELLATION_INTERVAL.key(), 5);
config.setLong(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT.key(), 60 * 1000);
Task task = createTask(InvokableBlockingInCancel.class, config);
task.startTaskThread();
awaitLatch.await();
task.cancelExecution();
task.getExecutingThread().join();
// No fatal error
for (Object msg : taskManagerMessages) {
assertFalse("Unexpected FatalError message", msg instanceof TaskManagerMessages.FatalError);
}
}
示例3: initialize
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void initialize() throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
assertTrue("Unable to delete temp file", logDir.delete());
assertTrue("Unable to create temp directory", logDir.mkdir());
File logFile = new File(logDir, "jobmanager.log");
File outFile = new File(logDir, "jobmanager.out");
Files.createFile(logFile.toPath());
Files.createFile(outFile.toPath());
config.setString(JobManagerOptions.WEB_LOG_PATH, logFile.getAbsolutePath());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.getAbsolutePath());
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
port = cluster.webMonitor().get().getServerPort();
}
示例4: hasNewNetworkBufConfMixed
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Verifies that {@link TaskManagerServicesConfiguration#hasNewNetworkBufConf(Configuration)}
* returns the correct result for mixed old/new configurations.
*/
@SuppressWarnings("deprecation")
@Test
public void hasNewNetworkBufConfMixed() throws Exception {
Configuration config = new Configuration();
assertTrue(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config));
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 1);
assertFalse(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config));
// old + 1 new parameter = new:
Configuration config1 = config.clone();
config1.setFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION, 0.1f);
assertTrue(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config1));
config1 = config.clone();
config1.setLong(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN, 1024);
assertTrue(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config1));
config1 = config.clone();
config1.setLong(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX, 1024);
assertTrue(TaskManagerServicesConfiguration.hasNewNetworkBufConf(config1));
}
示例5: setup
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setup() {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS_PER_TM);
config.setInteger(QueryableStateOptions.CLIENT_NETWORK_THREADS, 1);
config.setBoolean(QueryableStateOptions.SERVER_ENABLE, true);
config.setInteger(QueryableStateOptions.SERVER_NETWORK_THREADS, 1);
cluster = new TestingCluster(config, false);
cluster.start(true);
testActorSystem = AkkaUtils.createDefaultActorSystem();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例6: setup
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
public static void setup(int proxyPortRangeStart, int serverPortRangeStart) {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS_PER_TM);
config.setInteger(QueryableStateOptions.CLIENT_NETWORK_THREADS, 1);
config.setInteger(QueryableStateOptions.PROXY_NETWORK_THREADS, 1);
config.setInteger(QueryableStateOptions.SERVER_NETWORK_THREADS, 1);
config.setString(QueryableStateOptions.PROXY_PORT_RANGE, proxyPortRangeStart + "-" + (proxyPortRangeStart + NUM_TMS));
config.setString(QueryableStateOptions.SERVER_PORT_RANGE, serverPortRangeStart + "-" + (serverPortRangeStart + NUM_TMS));
cluster = new TestingCluster(config, false);
cluster.start(true);
client = new QueryableStateClient("localhost", proxyPortRangeStart);
// verify that we are not in HA mode
Assert.assertTrue(cluster.haMode() == HighAvailabilityMode.NONE);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例7: setup
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
public static void setup(int proxyPortRangeStart, int serverPortRangeStart) {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS_PER_TM);
config.setInteger(QueryableStateOptions.CLIENT_NETWORK_THREADS, 1);
config.setBoolean(QueryableStateOptions.SERVER_ENABLE, true);
config.setInteger(QueryableStateOptions.SERVER_NETWORK_THREADS, 1);
config.setString(QueryableStateOptions.PROXY_PORT_RANGE, proxyPortRangeStart + "-" + (proxyPortRangeStart + NUM_TMS));
config.setString(QueryableStateOptions.SERVER_PORT_RANGE, serverPortRangeStart + "-" + (serverPortRangeStart + NUM_TMS));
cluster = new TestingCluster(config, false);
cluster.start(true);
client = new QueryableStateClient("localhost", proxyPortRangeStart);
// verify that we are not in HA mode
Assert.assertTrue(cluster.haMode() == HighAvailabilityMode.NONE);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例8: startCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void startCluster() {
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_TASK_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
}
catch (Exception e) {
e.printStackTrace();
fail("Failed to start test cluster: " + e.getMessage());
}
}
示例9: calculateHeapSizeMB
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Test for {@link TaskManagerServices#calculateHeapSizeMB(long, Configuration)} with some
* manually calculated scenarios.
*/
@Test
public void calculateHeapSizeMB() throws Exception {
Configuration config = new Configuration();
config.setFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION, 0.1f);
config.setLong(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN, 64L << 20); // 64MB
config.setLong(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX, 1L << 30); // 1GB
config.setBoolean(TaskManagerOptions.MEMORY_OFF_HEAP, false);
assertEquals(900, TaskManagerServices.calculateHeapSizeMB(1000, config));
config.setBoolean(TaskManagerOptions.MEMORY_OFF_HEAP, false);
config.setFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION, 0.2f);
assertEquals(800, TaskManagerServices.calculateHeapSizeMB(1000, config));
config.setBoolean(TaskManagerOptions.MEMORY_OFF_HEAP, true);
config.setFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION, 0.1f);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 10); // 10MB
assertEquals(890, TaskManagerServices.calculateHeapSizeMB(1000, config));
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, -1); // use fraction of given memory
config.setFloat(TaskManagerOptions.MANAGED_MEMORY_FRACTION, 0.1f); // 10%
assertEquals(810, TaskManagerServices.calculateHeapSizeMB(1000, config));
}
示例10: execute
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
static LocalFlinkMiniCluster execute(LocalStreamEnvironment env,
Configuration conf, String jobName) throws Exception {
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setJobName(jobName);
JobGraph jobGraph = streamGraph.getJobGraph();
Configuration configuration = new Configuration(conf);
configuration.addAll(jobGraph.getJobConfiguration());
configuration.setLong("taskmanager.memory.size", -1L);
configuration.setInteger("taskmanager.numberOfTaskSlots", jobGraph.getMaximumParallelism());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(configuration, true);
cluster.start();
cluster.submitJobDetached(jobGraph);
return cluster;
}
示例11: setAsContext
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
public static void setAsContext() {
Configuration config = new Configuration();
// the default network buffers size (10% of heap max =~ 150MB) seems to much for this test case
config.setLong(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX, 80L << 20); // 80 MB
final LocalEnvironment le = new LocalEnvironment(config);
initializeContextEnvironment(new ExecutionEnvironmentFactory() {
@Override
public ExecutionEnvironment createExecutionEnvironment() {
return le;
}
});
}
示例12: prepare
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void prepare() throws IOException, ClassNotFoundException {
LOG.info("-------------------------------------------------------------------------");
LOG.info(" Starting KafkaShortRetentionTestBase ");
LOG.info("-------------------------------------------------------------------------");
Configuration flinkConfig = new Configuration();
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
if (kafkaServer.isSecureRunSupported()) {
secureProps = kafkaServer.getSecureProperties();
}
Properties specificProperties = new Properties();
specificProperties.setProperty("log.retention.hours", "0");
specificProperties.setProperty("log.retention.minutes", "0");
specificProperties.setProperty("log.retention.ms", "250");
specificProperties.setProperty("log.retention.check.interval.ms", "100");
kafkaServer.prepare(kafkaServer.createConfig().setKafkaServerProperties(specificProperties));
standardProps = kafkaServer.getStandardProperties();
// start also a re-usable Flink mini cluster
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, TM_SLOTS);
flinkConfig.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 16L);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
TestStreamEnvironment.setAsContext(flink, PARALLELISM);
}
示例13: startCluster
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
public static LocalFlinkMiniCluster startCluster(
Configuration config,
boolean singleActorSystem) throws Exception {
logDir = File.createTempFile("TestBaseUtils-logdir", null);
Assert.assertTrue("Unable to delete temp file", logDir.delete());
Assert.assertTrue("Unable to create temp directory", logDir.mkdir());
Path logFile = Files.createFile(new File(logDir, "jobmanager.log").toPath());
Files.createFile(new File(logDir, "jobmanager.out").toPath());
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, TASK_MANAGER_MEMORY_SIZE);
config.setBoolean(ConfigConstants.FILESYSTEM_DEFAULT_OVERWRITE_KEY, true);
config.setString(AkkaOptions.ASK_TIMEOUT, DEFAULT_AKKA_ASK_TIMEOUT + "s");
config.setString(AkkaOptions.STARTUP_TIMEOUT, DEFAULT_AKKA_STARTUP_TIMEOUT);
config.setInteger(WebOptions.PORT, 8081);
config.setString(WebOptions.LOG_PATH, logFile.toString());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.toString());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(config, singleActorSystem);
cluster.start();
return cluster;
}
示例14: testStartupWhenNetworkStackFailsToInitialize
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* Tests that the task manager start-up fails if the network stack cannot be initialized.
* @throws Exception
*/
@Test(expected = IOException.class)
public void testStartupWhenNetworkStackFailsToInitialize() throws Exception {
ServerSocket blocker = null;
try {
blocker = new ServerSocket(0, 50, InetAddress.getByName("localhost"));
final Configuration cfg = new Configuration();
cfg.setString(ConfigConstants.TASK_MANAGER_HOSTNAME_KEY, "localhost");
cfg.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, blocker.getLocalPort());
cfg.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 1L);
TaskManager.startTaskManagerComponentsAndActor(
cfg,
ResourceID.generate(),
null,
highAvailabilityServices,
NoOpMetricRegistry.INSTANCE,
"localhost",
Option.<String>empty(),
false,
TaskManager.class);
}
finally {
if (blocker != null) {
try {
blocker.close();
}
catch (IOException e) {
// ignore, best effort
}
}
}
}
示例15: testFatalErrorAfterUninterruptibleInvoke
import org.apache.flink.configuration.Configuration; //導入方法依賴的package包/類
/**
* The invoke() method blocks infinitely, but cancel() does not block. Only
* resolved by a fatal error.
*/
@Test
public void testFatalErrorAfterUninterruptibleInvoke() throws Exception {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.TASK_CANCELLATION_INTERVAL, 5);
config.setLong(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, 50);
Task task = createTask(InvokableUninterruptibleBlockingInvoke.class, config);
try {
task.startTaskThread();
awaitLatch.await();
task.cancelExecution();
for (int i = 0; i < 10; i++) {
Object msg = taskManagerMessages.poll(1, TimeUnit.SECONDS);
if (msg instanceof TaskManagerMessages.FatalError) {
return; // success
}
}
fail("Did not receive expected task manager message");
} finally {
// Interrupt again to clean up Thread
cancelLatch.trigger();
task.getExecutingThread().interrupt();
task.getExecutingThread().join();
}
}