本文整理汇总了Java中org.apache.flink.api.java.ExecutionEnvironment.createLocalEnvironment方法的典型用法代码示例。如果您正苦于以下问题:Java ExecutionEnvironment.createLocalEnvironment方法的具体用法?Java ExecutionEnvironment.createLocalEnvironment怎么用?Java ExecutionEnvironment.createLocalEnvironment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.api.java.ExecutionEnvironment
的用法示例。
在下文中一共展示了ExecutionEnvironment.createLocalEnvironment方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkJoinWithReplicatedSourceInputBehindReduce
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests compiler fail for join program with replicated data source behind reduce.
*/
@Test(expected = CompilerException.class)
public void checkJoinWithReplicatedSourceInputBehindReduce() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
TupleTypeInfo<Tuple1<String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class);
ReplicatingInputFormat<Tuple1<String>, FileInputSplit> rif =
new ReplicatingInputFormat<Tuple1<String>, FileInputSplit>(new TupleCsvInputFormat<Tuple1<String>>(new Path("/some/path"), typeInfo));
DataSet<Tuple1<String>> source1 = env.createInput(rif, new TupleTypeInfo<Tuple1<String>>(BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple1<String>> source2 = env.readCsvFile("/some/otherpath").types(String.class);
DataSink<Tuple2<Tuple1<String>, Tuple1<String>>> out = source1
.reduce(new LastReduce())
.join(source2).where("*").equalTo("*")
.writeAsText("/some/newpath");
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
}
示例2: checkJoinWithReplicatedSourceInputBehindRebalance
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests compiler fail for join program with replicated data source behind rebalance.
*/
@Test(expected = CompilerException.class)
public void checkJoinWithReplicatedSourceInputBehindRebalance() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
TupleTypeInfo<Tuple1<String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class);
ReplicatingInputFormat<Tuple1<String>, FileInputSplit> rif =
new ReplicatingInputFormat<Tuple1<String>, FileInputSplit>(new TupleCsvInputFormat<Tuple1<String>>(new Path("/some/path"), typeInfo));
DataSet<Tuple1<String>> source1 = env.createInput(rif, new TupleTypeInfo<Tuple1<String>>(BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple1<String>> source2 = env.readCsvFile("/some/otherpath").types(String.class);
DataSink<Tuple2<Tuple1<String>, Tuple1<String>>> out = source1
.rebalance()
.join(source2).where("*").equalTo("*")
.writeAsText("/some/newpath");
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
}
示例3: main
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] argv) throws Exception {
// BasicConfigurator.configure(new ConsoleAppender(new
// PatternLayout()));
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
OSMPBFWayInputFormat iformat = new OSMPBFWayInputFormat();
iformat.setFilePath("C:/projets/OSMImport/france-latest.osm.pbf");
// iformat.setFilePath("C:/projets/OSMImport/rhone-alpes-latest.osm.pbf");
FileInputSplit[] s = iformat.createInputSplits(4);
DataSource<WayEntity> r = env.createInput(iformat, new GenericTypeInfo<WayEntity>(WayEntity.class));
r.flatMap(new FlatMapFunction<WayEntity, Tuple2<Long, String>>() {
@Override
public void flatMap(WayEntity value, Collector<Tuple2<Long, String>> out)
throws Exception {
if (value.fields != null) {
if (value.fields.containsKey("type")) {
out.collect(new Tuple2<>(value.id, (String) value.fields.get("type")));
}
}
}
}).writeAsCsv("test.csv");
env.execute();
}
示例4: checkSinglePartitionedOrderedSource3
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedOrderedSource3() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy(0)
.splitsOrderedBy(new int[]{1}, new Order[]{Order.ASCENDING});
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
示例5: checkSinglePartitionedSource7
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedSource7() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy("byDate", 1, 0);
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING);
Assert.assertTrue(gprops.getCustomPartitioner() != null);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
示例6: checkSinglePartitionedOrderedSource7
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedOrderedSource7() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType);
data.getSplitDataProperties()
.splitsPartitionedBy("f1")
.splitsOrderedBy("f1.stringField", new Order[]{Order.ASCENDING});
data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1,2,3)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
示例7: checkSinglePartitionedGroupedSource7
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedGroupedSource7() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType);
data.getSplitDataProperties()
.splitsPartitionedBy("f1.intField")
.splitsGroupedBy("f1");
data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(2)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(1,2,3)));
Assert.assertTrue(lprops.getOrdering() == null);
}
示例8: checkSinglePartitionedGroupedSource1
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedGroupedSource1() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy(0)
.splitsGroupedBy(0);
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(0)));
Assert.assertTrue(lprops.getOrdering() == null);
}
示例9: checkSinglePartitionedOrderedSource1
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedOrderedSource1() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy(1)
.splitsOrderedBy(new int[]{1}, new Order[]{Order.ASCENDING});
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue((new FieldSet(lprops.getGroupedFields().toArray())).equals(new FieldSet(1)));
Assert.assertTrue(lprops.getOrdering() == null);
}
示例10: checkSinglePartitionedSource1
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedSource1() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy(0);
data.output(new DiscardingOutputFormat<Tuple2<Long,String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
示例11: main
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] argv) throws Exception {
// BasicConfigurator.configure(new ConsoleAppender(new
// PatternLayout()));
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
OSMPBFRelationInputFormat iformat = new OSMPBFRelationInputFormat();
//iformat.setFilePath("C:/projets/OSMImport/france-latest.osm.pbf");
iformat.setFilePath("C:/projets/OSMImport/rhone-alpes-latest.osm.pbf");
FileInputSplit[] s = iformat.createInputSplits(4);
DataSource<Relation> r = env.createInput(iformat, new GenericTypeInfo<Relation>(Relation.class));
r.flatMap(new FlatMapFunction<Relation, Tuple2<Long, String>>() {
@Override
public void flatMap(Relation value, Collector<Tuple2<Long, String>> out)
throws Exception {
if (value.fields != null) {
if (value.fields.containsKey("type")) {
out.collect(new Tuple2<>(value.id, (String) value.fields.get("type")));
}
}
}
}).writeAsCsv("test.csv");
env.execute();
}
示例12: checkSinglePartitionedGroupedSource4
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void checkSinglePartitionedGroupedSource4() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
DataSource<Tuple2<Long, String>> data =
env.readCsvFile("/some/path").types(Long.class, String.class);
data.getSplitDataProperties()
.splitsPartitionedBy(0, 1)
.splitsGroupedBy(0);
data.output(new DiscardingOutputFormat<Tuple2<Long, String>>());
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor();
GlobalProperties gprops = sourceNode.getGlobalProperties();
LocalProperties lprops = sourceNode.getLocalProperties();
Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1)));
Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING);
Assert.assertTrue(lprops.getGroupedFields() == null);
Assert.assertTrue(lprops.getOrdering() == null);
}
示例13: checkJoinWithReplicatedSourceInputBehindFilter
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests join program with replicated data source behind filter.
*/
@Test
public void checkJoinWithReplicatedSourceInputBehindFilter() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
TupleTypeInfo<Tuple1<String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class);
ReplicatingInputFormat<Tuple1<String>, FileInputSplit> rif =
new ReplicatingInputFormat<Tuple1<String>, FileInputSplit>(new TupleCsvInputFormat<Tuple1<String>>(new Path("/some/path"), typeInfo));
DataSet<Tuple1<String>> source1 = env.createInput(rif, new TupleTypeInfo<Tuple1<String>>(BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple1<String>> source2 = env.readCsvFile("/some/otherpath").types(String.class);
DataSink<Tuple2<Tuple1<String>, Tuple1<String>>> out = source1
.filter(new NoFilter())
.join(source2).where("*").equalTo("*")
.writeAsText("/some/newpath");
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
// when join should have forward strategy on both sides
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
DualInputPlanNode joinNode = (DualInputPlanNode) sinkNode.getPredecessor();
ShipStrategyType joinIn1 = joinNode.getInput1().getShipStrategy();
ShipStrategyType joinIn2 = joinNode.getInput2().getShipStrategy();
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn1);
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn2);
}
示例14: checkJoinWithReplicatedSourceInputBehindFlatMap
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests join program with replicated data source behind flatMap.
*/
@Test
public void checkJoinWithReplicatedSourceInputBehindFlatMap() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
TupleTypeInfo<Tuple1<String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class);
ReplicatingInputFormat<Tuple1<String>, FileInputSplit> rif =
new ReplicatingInputFormat<Tuple1<String>, FileInputSplit>(new TupleCsvInputFormat<Tuple1<String>>(new Path("/some/path"), typeInfo));
DataSet<Tuple1<String>> source1 = env.createInput(rif, new TupleTypeInfo<Tuple1<String>>(BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple1<String>> source2 = env.readCsvFile("/some/otherpath").types(String.class);
DataSink<Tuple2<Tuple1<String>, Tuple1<String>>> out = source1
.flatMap(new IdFlatMap())
.join(source2).where("*").equalTo("*")
.writeAsText("/some/newpath");
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
// when join should have forward strategy on both sides
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
DualInputPlanNode joinNode = (DualInputPlanNode) sinkNode.getPredecessor();
ShipStrategyType joinIn1 = joinNode.getInput1().getShipStrategy();
ShipStrategyType joinIn2 = joinNode.getInput2().getShipStrategy();
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn1);
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn2);
}
示例15: checkJoinWithReplicatedSourceInputBehindMapPartition
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests join program with replicated data source behind map partition.
*/
@Test
public void checkJoinWithReplicatedSourceInputBehindMapPartition() {
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
env.setParallelism(DEFAULT_PARALLELISM);
TupleTypeInfo<Tuple1<String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class);
ReplicatingInputFormat<Tuple1<String>, FileInputSplit> rif =
new ReplicatingInputFormat<Tuple1<String>, FileInputSplit>(new TupleCsvInputFormat<Tuple1<String>>(new Path("/some/path"), typeInfo));
DataSet<Tuple1<String>> source1 = env.createInput(rif, new TupleTypeInfo<Tuple1<String>>(BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple1<String>> source2 = env.readCsvFile("/some/otherpath").types(String.class);
DataSink<Tuple2<Tuple1<String>, Tuple1<String>>> out = source1
.mapPartition(new IdPMap())
.join(source2).where("*").equalTo("*")
.writeAsText("/some/newpath");
Plan plan = env.createProgramPlan();
// submit the plan to the compiler
OptimizedPlan oPlan = compileNoStats(plan);
// check the optimized Plan
// when join should have forward strategy on both sides
SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
DualInputPlanNode joinNode = (DualInputPlanNode) sinkNode.getPredecessor();
ShipStrategyType joinIn1 = joinNode.getInput1().getShipStrategy();
ShipStrategyType joinIn2 = joinNode.getInput2().getShipStrategy();
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn1);
Assert.assertEquals("Invalid ship strategy for an operator.", ShipStrategyType.FORWARD, joinIn2);
}