本文整理汇总了Java中org.apache.flink.api.java.ExecutionEnvironment.createRemoteEnvironment方法的典型用法代码示例。如果您正苦于以下问题:Java ExecutionEnvironment.createRemoteEnvironment方法的具体用法?Java ExecutionEnvironment.createRemoteEnvironment怎么用?Java ExecutionEnvironment.createRemoteEnvironment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.api.java.ExecutionEnvironment
的用法示例。
在下文中一共展示了ExecutionEnvironment.createRemoteEnvironment方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testInvalidAkkaConfiguration
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Ensure that that Akka configuration parameters can be set.
*/
@Test(expected=FlinkException.class)
public void testInvalidAkkaConfiguration() throws Throwable {
Configuration config = new Configuration();
config.setString(AkkaOptions.STARTUP_TIMEOUT, INVALID_STARTUP_TIMEOUT);
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
cluster.getHostname(),
cluster.getPort(),
config
);
env.getConfig().disableSysoutLogging();
DataSet<String> result = env.createInput(new TestNonRichInputFormat());
result.output(new LocalCollectionOutputFormat<>(new ArrayList<String>()));
try {
env.execute();
Assert.fail("Program should not run successfully, cause of invalid akka settings.");
} catch (ProgramInvocationException ex) {
throw ex.getCause();
}
}
示例2: testUserSpecificParallelism
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Ensure that the program parallelism can be set even if the configuration is supplied.
*/
@Test
public void testUserSpecificParallelism() throws Exception {
Configuration config = new Configuration();
config.setString(AkkaOptions.STARTUP_TIMEOUT, VALID_STARTUP_TIMEOUT);
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
cluster.getHostname(),
cluster.getPort(),
config
);
env.setParallelism(USER_DOP);
env.getConfig().disableSysoutLogging();
DataSet<Integer> result = env.createInput(new ParallelismDependentInputFormat())
.rebalance()
.mapPartition(new RichMapPartitionFunction<Integer, Integer>() {
@Override
public void mapPartition(Iterable<Integer> values, Collector<Integer> out) throws Exception {
out.collect(getRuntimeContext().getIndexOfThisSubtask());
}
});
List<Integer> resultCollection = result.collect();
assertEquals(USER_DOP, resultCollection.size());
}
示例3: testInvalidAkkaConfiguration
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Ensure that that Akka configuration parameters can be set.
*/
@Test(expected = FlinkException.class)
public void testInvalidAkkaConfiguration() throws Throwable {
Configuration config = new Configuration();
config.setString(AkkaOptions.STARTUP_TIMEOUT, INVALID_STARTUP_TIMEOUT);
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
cluster.getHostname(),
cluster.getPort(),
config
);
env.getConfig().disableSysoutLogging();
DataSet<String> result = env.createInput(new TestNonRichInputFormat());
result.output(new LocalCollectionOutputFormat<>(new ArrayList<String>()));
try {
env.execute();
Assert.fail("Program should not run successfully, cause of invalid akka settings.");
} catch (ProgramInvocationException ex) {
throw ex.getCause();
}
}
示例4: testRestart
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testRestart() {
try {
List<Long> resultCollection = new ArrayList<Long>();
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
"localhost", cluster.getLeaderRPCPort());
env.setParallelism(4);
// the default restart strategy should be taken
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10)
.rebalance()
.map(new FailingMapper2<Long>())
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
})
.output(new LocalCollectionOutputFormat<Long>(resultCollection));
executeAndRunAssertions(env);
long sum = 0;
for (long l : resultCollection) {
sum += l;
}
assertEquals(55, sum);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例5: testRestartMultipleTimes
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testRestartMultipleTimes() {
try {
List<Long> resultCollection = new ArrayList<Long>();
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
"localhost", cluster.getLeaderRPCPort());
env.setParallelism(4);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 100));
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10)
.rebalance()
.map(new FailingMapper3<Long>())
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
})
.output(new LocalCollectionOutputFormat<Long>(resultCollection));
executeAndRunAssertions(env);
long sum = 0;
for (long l : resultCollection) {
sum += l;
}
assertEquals(55, sum);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例6: runConnectedComponents
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
private static void runConnectedComponents(int jmPort) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jmPort);
env.setParallelism(PARALLELISM);
env.getConfig().disableSysoutLogging();
// read vertex and edge data
DataSet<Long> vertices = ConnectedComponentsData.getDefaultVertexDataSet(env)
.rebalance();
DataSet<Tuple2<Long, Long>> edges = ConnectedComponentsData.getDefaultEdgeDataSet(env)
.rebalance()
.flatMap(new ConnectedComponents.UndirectEdge());
// assign the initial components (equal to the vertex id)
DataSet<Tuple2<Long, Long>> verticesWithInitialId = vertices
.map(new ConnectedComponents.DuplicateValue<Long>());
// open a delta iteration
DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration =
verticesWithInitialId.iterateDelta(verticesWithInitialId, 100, 0);
// apply the step logic: join with the edges, select the minimum neighbor,
// update if the component of the candidate is smaller
DataSet<Tuple2<Long, Long>> changes = iteration.getWorkset().join(edges)
.where(0).equalTo(0)
.with(new ConnectedComponents.NeighborWithComponentIDJoin())
.groupBy(0).aggregate(Aggregations.MIN, 1)
.join(iteration.getSolutionSet())
.where(0).equalTo(0)
.with(new ConnectedComponents.ComponentIdFilter());
// close the delta iteration (delta and new workset are identical)
DataSet<Tuple2<Long, Long>> result = iteration.closeWith(changes, changes);
result.output(new DiscardingOutputFormat<Tuple2<Long, Long>>());
env.execute();
}
示例7: testTaskManagerFailure
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testTaskManagerFailure(int jobManagerPort, final File coordinateDir) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
env.setParallelism(PARALLELISM);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 10000));
env.getConfig().setExecutionMode(executionMode);
env.getConfig().disableSysoutLogging();
final long numElements = 100000L;
final DataSet<Long> result = env.generateSequence(1, numElements)
// make sure every mapper is involved (no one is skipped because of lazy split assignment)
.rebalance()
// the majority of the behavior is in the MapFunction
.map(new RichMapFunction<Long, Long>() {
private final File proceedFile = new File(coordinateDir, PROCEED_MARKER_FILE);
private boolean markerCreated = false;
private boolean checkForProceedFile = true;
@Override
public Long map(Long value) throws Exception {
if (!markerCreated) {
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
touchFile(new File(coordinateDir, READY_MARKER_FILE_PREFIX + taskIndex));
markerCreated = true;
}
// check if the proceed file exists
if (checkForProceedFile) {
if (proceedFile.exists()) {
checkForProceedFile = false;
} else {
// otherwise wait so that we make slow progress
Thread.sleep(100);
}
}
return value;
}
})
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
});
long sum = result.collect().get(0);
assertEquals(numElements * (numElements + 1L) / 2L, sum);
}