本文整理汇总了Java中org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster类的典型用法代码示例。如果您正苦于以下问题:Java LocalFlinkMiniCluster类的具体用法?Java LocalFlinkMiniCluster怎么用?Java LocalFlinkMiniCluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LocalFlinkMiniCluster类属于org.apache.flink.runtime.minicluster包,在下文中一共展示了LocalFlinkMiniCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
public static LocalFlinkMiniCluster startCluster(
int numTaskManagers,
int taskManagerNumSlots,
boolean startWebserver,
boolean startZooKeeper,
boolean singleActorSystem) throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTaskManagers);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, taskManagerNumSlots);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, startWebserver);
if (startZooKeeper) {
config.setInteger(ConfigConstants.LOCAL_NUMBER_JOB_MANAGER, 3);
config.setString(HighAvailabilityOptions.HA_MODE, "zookeeper");
}
return startCluster(config, singleActorSystem);
}
示例2: setup
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS / 2);
cluster = new LocalFlinkMiniCluster(config);
cluster.start();
final JobVertex jobVertex = new JobVertex("Working job vertex.");
jobVertex.setInvokableClass(NoOpInvokable.class);
workingJobGraph = new JobGraph("Working testing job", jobVertex);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例3: initialize
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void initialize() throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
assertTrue("Unable to delete temp file", logDir.delete());
assertTrue("Unable to create temp directory", logDir.mkdir());
File logFile = new File(logDir, "jobmanager.log");
File outFile = new File(logDir, "jobmanager.out");
Files.createFile(logFile.toPath());
Files.createFile(outFile.toPath());
config.setString(JobManagerOptions.WEB_LOG_PATH, logFile.getAbsolutePath());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.getAbsolutePath());
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
port = cluster.webMonitor().get().getServerPort();
}
示例4: setup
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void setup() {
try {
Configuration config = new Configuration();
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 4L);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS / 2);
cluster = new LocalFlinkMiniCluster(config);
cluster.start();
final JobVertex jobVertex = new JobVertex("Working job vertex.");
jobVertex.setInvokableClass(NoOpInvokable.class);
workingJobGraph = new JobGraph("Working testing job", jobVertex);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例5: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void startCluster() {
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_TASK_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
}
catch (Exception e) {
e.printStackTrace();
fail("Failed to start test cluster: " + e.getMessage());
}
}
示例6: testProgram
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
private void testProgram(
LocalFlinkMiniCluster localFlinkMiniCluster,
final int dataVolumeGb,
final boolean useForwarder,
final boolean isSlowSender,
final boolean isSlowReceiver,
final int parallelism) throws Exception {
JobExecutionResult jer = localFlinkMiniCluster.submitJobAndWait(
createJobGraph(
dataVolumeGb,
useForwarder,
isSlowSender,
isSlowReceiver,
parallelism),
false);
long dataVolumeMbit = dataVolumeGb * 8192;
long runtimeSecs = jer.getNetRuntime(TimeUnit.SECONDS);
int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs);
LOG.info(String.format("Test finished with throughput of %d MBit/s (runtime [secs]: %d, " +
"data volume [gb/mbits]: %d/%d)", mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit));
}
示例7: initialize
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void initialize() throws Exception {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_SLOTS);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
config.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
assertTrue("Unable to delete temp file", logDir.delete());
assertTrue("Unable to create temp directory", logDir.mkdir());
File logFile = new File(logDir, "jobmanager.log");
File outFile = new File(logDir, "jobmanager.out");
Files.createFile(logFile.toPath());
Files.createFile(outFile.toPath());
config.setString(WebOptions.LOG_PATH, logFile.getAbsolutePath());
config.setString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, logFile.getAbsolutePath());
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
port = cluster.webMonitor().get().getServerPort();
}
示例8: setupFlink
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
/**
* Custom test cluster start routine,
* workaround to set TASK_MANAGER_MEMORY_SIZE.
*
* TODO: remove, when future issue is fixed
* {@see http://mail-archives.apache.org/mod_mbox/flink-dev/201511.mbox/%[email protected].com%3E}
*
* @throws Exception
*/
@BeforeClass
public static void setupFlink() throws Exception {
File logDir = File.createTempFile("TestBaseUtils-logdir", null);
Assert.assertTrue("Unable to delete temp file", logDir.delete());
Assert.assertTrue("Unable to create temp directory", logDir.mkdir());
Files.createFile((new File(logDir, "jobmanager.out")).toPath());
Path logFile = Files
.createFile((new File(logDir, "jobmanager.log")).toPath());
Configuration config = new Configuration();
config.setInteger("local.number-taskmanager", 1);
config.setInteger("taskmanager.numberOfTaskSlots", DEFAULT_PARALLELISM);
config.setBoolean("local.start-webserver", false);
config.setLong("taskmanager.memory.size", TASKMANAGER_MEMORY_SIZE_MB);
config.setBoolean("fs.overwrite-files", true);
config.setString("akka.ask.timeout", "1000s");
config.setString("akka.startup-timeout", "60 s");
config.setInteger("jobmanager.web.port", 8081);
config.setString("jobmanager.web.log.path", logFile.toString());
CLUSTER = new LocalFlinkMiniCluster(config, true);
CLUSTER.start();
}
示例9: execute
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
static LocalFlinkMiniCluster execute(LocalStreamEnvironment env,
Configuration conf, String jobName) throws Exception {
StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setJobName(jobName);
JobGraph jobGraph = streamGraph.getJobGraph();
Configuration configuration = new Configuration(conf);
configuration.addAll(jobGraph.getJobConfiguration());
configuration.setLong("taskmanager.memory.size", -1L);
configuration.setInteger("taskmanager.numberOfTaskSlots", jobGraph.getMaximumParallelism());
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(configuration, true);
cluster.start();
cluster.submitJobDetached(jobGraph);
return cluster;
}
示例10: beforeClass
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() {
// start also a re-usable Flink mini cluster
Configuration flinkConfig = new Configuration();
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
flinkPort = flink.getLeaderRPCPort();
}
示例11: startFlinkMiniCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
private void startFlinkMiniCluster() {
localFlinkCluster = new LocalFlinkMiniCluster(flinkConf, false);
try {
localFlinkCluster.start(true);
} catch (Exception e){
throw new RuntimeException("Could not start Flink mini cluster.", e);
}
}
示例12: startClusters
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
protected static void startClusters(boolean secureMode, boolean hideKafkaBehindProxy) throws ClassNotFoundException {
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
kafkaServer.prepare(kafkaServer.createConfig()
.setKafkaServersNumber(NUMBER_OF_KAFKA_SERVERS)
.setSecureMode(secureMode)
.setHideKafkaBehindProxy(hideKafkaBehindProxy));
standardProps = kafkaServer.getStandardProperties();
brokerConnectionStrings = kafkaServer.getBrokerConnectionString();
if (secureMode) {
if (!kafkaServer.isSecureRunSupported()) {
throw new IllegalStateException(
"Attempting to test in secure mode but secure mode not supported by the KafkaTestEnvironment.");
}
secureProps = kafkaServer.getSecureProperties();
}
// start also a re-usable Flink mini cluster
flink = new LocalFlinkMiniCluster(getFlinkConfiguration(), false);
flink.start();
}
示例13: prepare
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void prepare() throws IOException, ClassNotFoundException {
LOG.info("-------------------------------------------------------------------------");
LOG.info(" Starting KafkaShortRetentionTestBase ");
LOG.info("-------------------------------------------------------------------------");
Configuration flinkConfig = new Configuration();
// dynamically load the implementation for the test
Class<?> clazz = Class.forName("org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl");
kafkaServer = (KafkaTestEnvironment) InstantiationUtil.instantiate(clazz);
LOG.info("Starting KafkaTestBase.prepare() for Kafka " + kafkaServer.getVersion());
if (kafkaServer.isSecureRunSupported()) {
secureProps = kafkaServer.getSecureProperties();
}
Properties specificProperties = new Properties();
specificProperties.setProperty("log.retention.hours", "0");
specificProperties.setProperty("log.retention.minutes", "0");
specificProperties.setProperty("log.retention.ms", "250");
specificProperties.setProperty("log.retention.check.interval.ms", "100");
kafkaServer.prepare(kafkaServer.createConfig().setKafkaServerProperties(specificProperties));
standardProps = kafkaServer.getStandardProperties();
// start also a re-usable Flink mini cluster
flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, NUM_TMS);
flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, TM_SLOTS);
flinkConfig.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 16L);
flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");
flink = new LocalFlinkMiniCluster(flinkConfig, false);
flink.start();
TestStreamEnvironment.setAsContext(flink, PARALLELISM);
}
示例14: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@Before
public void startCluster() throws Exception {
verifyJvmOptions();
Configuration config = new Configuration();
config.setBoolean(ConfigConstants.FILESYSTEM_DEFAULT_OVERWRITE_KEY, true);
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 4);
config.setString(AkkaOptions.ASK_TIMEOUT, TestingUtils.DEFAULT_AKKA_ASK_TIMEOUT());
config.setInteger(TaskManagerOptions.MEMORY_SEGMENT_SIZE, 4096);
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 2048);
this.executor = new LocalFlinkMiniCluster(config, false);
this.executor.start();
}
示例15: startCluster
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster; //导入依赖的package包/类
@BeforeClass
public static void startCluster() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 3);
config.setLong(TaskManagerOptions.MANAGED_MEMORY_SIZE, 12L);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
env = new TestEnvironment(cluster, 6, false);
}