本文整理汇总了Java中org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster.start方法的典型用法代码示例。如果您正苦于以下问题:Java LocalFlinkMiniCluster.start方法的具体用法?Java LocalFlinkMiniCluster.start怎么用?Java LocalFlinkMiniCluster.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster
的用法示例。
在下文中一共展示了LocalFlinkMiniCluster.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void initialize() throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
assertTrue("Unable to delete temp file", logDir.delete());
assertTrue("Unable to create temp directory", logDir.mkdir());
File logFile = new File(logDir, "jobmanager.log");
File outFile = new File(logDir, "jobmanager.out");
Files.createFile(logFile.toPath());
Files.createFile(outFile.toPath());
config.setString(JobManagerOptions.WEB_LOG_PATH, logFile.getAbsolutePath());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.getAbsolutePath());
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
port = cluster.webMonitor().get().getServerPort();
}
示例2: setup
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS / 2);
cluster = new LocalFlinkMiniCluster(config);
cluster.start();
final JobVertex jobVertex = new JobVertex("Working job vertex.");
jobVertex.setInvokableClass(NoOpInvokable.class);
workingJobGraph = new JobGraph("Working testing job", jobVertex);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例3: setupFlink
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
/**
* Custom test cluster start routine,
* workaround to set TASK_MANAGER_MEMORY_SIZE.
*
* TODO: remove, when future issue is fixed
* {@see http://mail-archives.apache.org/mod_mbox/flink-dev/201511.mbox/%[email protected].com%3E}
*
* @throws Exception
*/
@BeforeClass
public static void setupFlink() throws Exception {
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
Assert.assertTrue("Unable to delete temp file", logDir.delete());
Assert.assertTrue("Unable to create temp directory", logDir.mkdir());
Files.createFile((new File(logDir, "jobmanager.out")).toPath());
Path logFile = Files
.createFile((new File(logDir, "jobmanager.log")).toPath());
Configuration config = new Configuration();
config.setInteger("local.number-taskmanager", 1);
config.setInteger("taskmanager.numberOfTaskSlots", DEFAULT_PARALLELISM);
config.setBoolean("local.start-webserver", false);
config.setLong("taskmanager.memory.size", TASKMANAGER_MEMORY_SIZE_MB);
config.setBoolean("fs.overwrite-files", true);
config.setString("akka.ask.timeout", "1000s");
config.setString("akka.startup-timeout", "60 s");
config.setInteger("jobmanager.web.port", 8081);
config.setString("jobmanager.web.log.path", logFile.toString());
CLUSTER = new LocalFlinkMiniCluster(config, true);
CLUSTER.start();
}
示例4: execute
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
static LocalFlinkMiniCluster execute(LocalStreamEnvironment env,
Configuration conf, String jobName) throws Exception {
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setJobName(jobName);
JobGraph jobGraph = streamGraph.getJobGraph();
Configuration configuration = new Configuration(conf);
configuration.addAll(jobGraph.getJobConfiguration());
configuration.setLong("taskmanager.memory.size", -1L);
configuration.setInteger("taskmanager.numberOfTaskSlots", jobGraph.getMaximumParallelism());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(configuration, true);
cluster.start();
cluster.submitJobDetached(jobGraph);
return cluster;
}
示例5: beforeClass
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() {
// start also a re-usable Flink mini cluster
Configuration flinkConfig = new Configuration();
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
flinkPort = flink.getLeaderRPCPort();
}
示例6: startFlinkMiniCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
private void startFlinkMiniCluster() {
localFlinkCluster = new LocalFlinkMiniCluster(flinkConf, false);
try {
localFlinkCluster.start(true);
} catch (Exception e){
throw new RuntimeException("Could not start Flink mini cluster.", e);
}
}
示例7: execute
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
/**
* Executes the JobGraph of the on a mini cluster of CLusterUtil with a user
* specified name.
*
* @param jobName
* name of the job
* @return The result of the job execution, containing elapsed time and accumulators.
*/
@Override
public JobExecutionResult execute(String jobName) throws Exception {
// transform the streaming program into a JobGraph
StreamGraph streamGraph = getStreamGraph();
streamGraph.setJobName(jobName);
JobGraph jobGraph = streamGraph.getJobGraph();
Configuration configuration = new Configuration();
configuration.addAll(jobGraph.getJobConfiguration());
configuration.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, -1L);
configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, jobGraph.getMaximumParallelism());
// add (and override) the settings with what the user defined
configuration.addAll(this.conf);
if (LOG.isInfoEnabled()) {
LOG.info("Running job on local embedded Flink mini cluster");
}
LocalFlinkMiniCluster exec = new LocalFlinkMiniCluster(configuration, true);
try {
exec.start();
return exec.submitJobAndWait(jobGraph, getConfig().isSysoutLoggingEnabled());
}
finally {
transformations.clear();
exec.stop();
}
}
示例8: prepare
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void prepare() throws IOException, ClassNotFoundException {
LOG.info("-------------------------------------------------------------------------");
LOG.info(" Starting KafkaShortRetentionTestBase ");
LOG.info("-------------------------------------------------------------------------");
Configuration flinkConfig = new Configuration();
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
if (kafkaServer.isSecureRunSupported()) {
secureProps = kafkaServer.getSecureProperties();
}
Properties specificProperties = new Properties();
specificProperties.setProperty("log.retention.hours", "0");
specificProperties.setProperty("log.retention.minutes", "0");
specificProperties.setProperty("log.retention.ms", "250");
specificProperties.setProperty("log.retention.check.interval.ms", "100");
kafkaServer.prepare(kafkaServer.createConfig().setKafkaServerProperties(specificProperties));
standardProps = kafkaServer.getStandardProperties();
// start also a re-usable Flink mini cluster
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, TM_SLOTS);
flinkConfig.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 16L);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
TestStreamEnvironment.setAsContext(flink, PARALLELISM);
}
示例9: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
public static LocalFlinkMiniCluster startCluster(
Configuration config,
boolean singleActorSystem) throws Exception {
logDir = File.createTempFile("TestBaseUtils-logdir", null);
Assert.assertTrue("Unable to delete temp file", logDir.delete());
Assert.assertTrue("Unable to create temp directory", logDir.mkdir());
Path logFile = Files.createFile(new File(logDir, "jobmanager.log").toPath());
Files.createFile(new File(logDir, "jobmanager.out").toPath());
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, TASK_MANAGER_MEMORY_SIZE);
config.setBoolean(ConfigConstants.FILESYSTEM_DEFAULT_OVERWRITE_KEY, true);
config.setString(AkkaOptions.ASK_TIMEOUT, DEFAULT_AKKA_ASK_TIMEOUT + "s");
config.setString(AkkaOptions.STARTUP_TIMEOUT, DEFAULT_AKKA_STARTUP_TIMEOUT);
config.setInteger(WebOptions.PORT, 8081);
config.setString(WebOptions.LOG_PATH, logFile.toString());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.toString());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(config, singleActorSystem);
cluster.start();
return cluster;
}
示例10: prepare
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void prepare() throws IOException, ClassNotFoundException {
LOG.info("-------------------------------------------------------------------------");
LOG.info(" Starting KafkaShortRetentionTestBase ");
LOG.info("-------------------------------------------------------------------------");
Configuration flinkConfig = new Configuration();
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
if(kafkaServer.isSecureRunSupported()) {
secureProps = kafkaServer.getSecureProperties();
}
Properties specificProperties = new Properties();
specificProperties.setProperty("log.retention.hours", "0");
specificProperties.setProperty("log.retention.minutes", "0");
specificProperties.setProperty("log.retention.ms", "250");
specificProperties.setProperty("log.retention.check.interval.ms", "100");
kafkaServer.prepare(1, specificProperties, false);
standardProps = kafkaServer.getStandardProperties();
// start also a re-usable Flink mini cluster
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
}
示例11: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void startCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 3);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
env = new TestEnvironment(cluster, 6, false);
}
示例12: testCollect
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@Test
public void testCollect() throws Exception {
final LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(new Configuration(), false);
try {
cluster.start();
TestStreamEnvironment.setAsContext(cluster, 1);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final long n = 10;
DataStream<Long> stream = env.generateSequence(1, n);
long i = 1;
for (Iterator<Long> it = DataStreamUtils.collect(stream); it.hasNext(); ) {
long x = it.next();
assertEquals("received wrong element", i, x);
i++;
}
assertEquals("received wrong number of elements", n + 1, i);
}
finally {
TestStreamEnvironment.unsetAsContext();
cluster.stop();
}
}
示例13: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void startCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 3);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
env = new TestEnvironment(cluster, PARALLELISM, false);
}
示例14: setupCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, PARALLELISM);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
TestEnvironment.setAsContext(cluster, PARALLELISM);
}
示例15: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入方法依赖的package包/类
@BeforeClass
public static void startCluster() throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_TASK_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
}