当前位置: 首页>>代码示例>>Java>>正文


Java ExecutionEnvironment.execute方法代码示例

本文整理汇总了Java中org.apache.flink.api.java.ExecutionEnvironment.execute方法的典型用法代码示例。如果您正苦于以下问题:Java ExecutionEnvironment.execute方法的具体用法?Java ExecutionEnvironment.execute怎么用?Java ExecutionEnvironment.execute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.api.java.ExecutionEnvironment的用法示例。


在下文中一共展示了ExecutionEnvironment.execute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String... args) throws  Exception {
    File txtFile = new File("/tmp/test/file.txt");
    File csvFile = new File("/tmp/test/file.csv");
    File binFile = new File("/tmp/test/file.bin");

    writeToFile(txtFile, "txt");
    writeToFile(csvFile, "csv");
    writeToFile(binFile, "bin");

    final ExecutionEnvironment env =
            ExecutionEnvironment.getExecutionEnvironment();
    final TextInputFormat format = new TextInputFormat(new Path("/tmp/test"));

    GlobFilePathFilter filesFilter = new GlobFilePathFilter(
            Collections.singletonList("**"),
            Arrays.asList("**/file.bin")
    );
    System.out.println(Arrays.toString(GlobFilePathFilter.class.getDeclaredFields()));
    format.setFilesFilter(filesFilter);

    DataSet<String> result = env.readFile(format, "/tmp");
    result.writeAsText("/temp/out");
    env.execute("GlobFilePathFilter-Test");
}
 
开发者ID:mushketyk,项目名称:flink-examples,代码行数:25,代码来源:GlobExample.java

示例2: testProgram

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	DataSet<Long> vertexIds = env.generateSequence(1, NUM_VERTICES);
	DataSet<String> edgeString = env.fromElements(ConnectedComponentsData.getRandomOddEvenEdges(NUM_EDGES, NUM_VERTICES, SEED).split("\n"));

	DataSet<Edge<Long, NullValue>> edges = edgeString.map(new EdgeParser());

	DataSet<Vertex<Long, Long>> initialVertices = vertexIds.map(new IdAssigner());

	Graph<Long, Long, NullValue> graph = Graph.fromDataSet(initialVertices, edges, env);

	DataSet<Vertex<Long, Long>> result = graph.run(new ConnectedComponents<>(100));

	result.writeAsCsv(resultPath, "\n", " ");
	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:ConnectedComponentsWithRandomisedEdgesITCase.java

示例3: testGroupReduceOnNeighborsWithVVInvalidEdgeTrgId

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Test groupReduceOnNeighbors() -NeighborsFunctionWithVertexValue-
 * with an edge having a trgId that does not exist in the vertex DataSet.
 */
@Test
public void testGroupReduceOnNeighborsWithVVInvalidEdgeTrgId() throws Exception {

	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();

	Graph<Long, Long, Long> graph = Graph.fromDataSet(TestGraphUtils.getLongLongVertexData(env),
			TestGraphUtils.getLongLongEdgeInvalidTrgData(env), env);

	try {
		DataSet<Tuple2<Long, Long>> verticesWithSumOfOutNeighborValues =
				graph.groupReduceOnNeighbors(new SumAllNeighbors(), EdgeDirection.ALL);

		verticesWithSumOfOutNeighborValues.output(new DiscardingOutputFormat<>());
		env.execute();

		fail("Expected an exception.");
	} catch (Exception e) {
		// We expect the job to fail with an exception
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:ReduceOnNeighborsWithExceptionITCase.java

示例4: main

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		env.getConfig().disableSysoutLogging();

		DataSet<Integer> data = env.createInput(new CustomInputFormat());

		data
			.map(new MapFunction<Integer, Tuple2<Integer, Double>>() {
				@Override
				public Tuple2<Integer, Double> map(Integer value) {
					return new Tuple2<Integer, Double>(value, value * 0.5);
				}
			})
			.output(new DiscardingOutputFormat<Tuple2<Integer, Double>>());

		env.execute();
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:19,代码来源:CustomInputSplitProgram.java

示例5: testPojoSortingNestedParallelism1

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testPojoSortingNestedParallelism1() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<CollectionDataSets.POJO> ds = CollectionDataSets.getMixedPojoDataSet(env);
	ds.writeAsText(resultPath)
		.sortLocalOutput("nestedTupleWithCustom.f0", Order.ASCENDING)
		.sortLocalOutput("nestedTupleWithCustom.f1.myInt", Order.DESCENDING)
		.sortLocalOutput("nestedPojo.longNumber", Order.ASCENDING)
		.setParallelism(1);

	env.execute();

	String expected =
			"2 First_ (10,105,1000,One) 10200\n" +
			"1 First (10,100,1000,One) 10100\n" +
			"4 First_ (11,106,1000,One) 10300\n" +
			"5 First (11,102,2000,One) 10100\n" +
			"3 First (11,102,3000,One) 10200\n" +
			"6 Second_ (20,200,2000,Two) 10100\n" +
			"8 Third_ (30,300,1000,Three) 10100\n" +
			"7 Third (31,301,2000,Three) 10200\n";

	compareResultsByLinesInMemoryWithStrictOrder(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:DataSinkITCase.java

示例6: testStandardCountingWithCombiner

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testStandardCountingWithCombiner() throws Exception{
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper1());

	DataSet<Tuple2<IntWritable, IntWritable>> counts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new SumReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	counts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,5)\n" +
			"(1,6)\n" +
			"(2,6)\n" +
			"(3,4)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:HadoopReduceCombineFunctionITCase.java

示例7: testProgram

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
	try {
		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);

		DataSet<Tuple2<Long, Long>> input = env.generateSequence(0, 9).map(new Duplicator<Long>());

		DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration = input.iterateDelta(input, 5, 1);

		iteration.closeWith(iteration.getWorkset(), iteration.getWorkset().map(new TestMapper()))
				.output(new LocalCollectionOutputFormat<Tuple2<Long, Long>>(result));

		env.execute();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:DeltaIterationNotDependingOnSolutionSetITCase.java

示例8: main

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	// set up the batch execution environment
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	/**
	 * Here, you can start creating your execution plan for Flink.
	 *
	 * Start with getting some data from the environment, like
	 * 	env.readTextFile(textPath);
	 *
	 * then, transform the resulting DataSet<String> using operations
	 * like
	 * 	.filter()
	 * 	.flatMap()
	 * 	.join()
	 * 	.coGroup()
	 *
	 * and many more.
	 * Have a look at the programming guide for the Java API:
	 *
	 * http://flink.apache.org/docs/latest/apis/batch/index.html
	 *
	 * and the examples
	 *
	 * http://flink.apache.org/docs/latest/apis/batch/examples.html
	 *
	 */

	// execute program
	env.execute("Flink Batch Java API Skeleton");
}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:32,代码来源:BatchJob.java

示例9: main

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    String inputPath = args[0];
    String outputPath = args[1] + "_" + System.currentTimeMillis();

    // set up the execution environment
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    // get input data
    DataSet<String> text = env.readTextFile(inputPath);
    DataSet<Tuple2<String, Long>> counts = text
            .<Tuple2<String, Long>>flatMap((line, out) -> {
                StringTokenizer tokenizer = new StringTokenizer(line);
                while (tokenizer.hasMoreTokens()) {
                    out.collect(new Tuple2<>(tokenizer.nextToken(), 1L));
                }
            })
            .returns(new TypeHint<Tuple2<String, Long>>() {
            })
            // group by the tuple field "0" and sum up tuple field "1"
            .groupBy(0)
            .sum(1);

    // emit result
    counts.writeAsCsv(outputPath);
    // execute program
    long t = System.currentTimeMillis();
    env.execute("Streaming WordCount Example");
    System.out.println("Time=" + (System.currentTimeMillis() - t));
}
 
开发者ID:hazelcast,项目名称:big-data-benchmark,代码行数:29,代码来源:FlinkWordCount.java

示例10: testAccumulatorCountDistinctLinearCounting

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
	public void testAccumulatorCountDistinctLinearCounting() throws Exception {

		String input = "";

		Random rand = new Random();

		for (int i = 1; i < 1000; i++) {
			if (rand.nextDouble() < 0.2) {
				input += String.valueOf(rand.nextInt(4)) + "\n";
			} else {
				input += String.valueOf(rand.nextInt(100)) + "\n";
			}
		}

		String inputFile = createTempFile("datapoints.txt", input);

		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		env.getConfig().disableSysoutLogging();

		OperatorStatisticsConfig operatorStatisticsConfig =
				new OperatorStatisticsConfig(false);
		operatorStatisticsConfig.collectCountDistinct = true;
		operatorStatisticsConfig.countDistinctAlgorithm = OperatorStatisticsConfig.CountDistinctAlgorithm.LINEAR_COUNTING;
		operatorStatisticsConfig.setCountDbitmap(10000);

		env.readTextFile(inputFile).
				flatMap(new StringToInt(operatorStatisticsConfig)).
				output(new DiscardingOutputFormat<Tuple1<Integer>>());

		JobExecutionResult result = env.execute();

		OperatorStatistics globalStats = result.getAccumulatorResult(ACCUMULATOR_NAME);
//		System.out.println("Global Stats");
//		System.out.println(globalStats.toString());

		Assert.assertTrue("Count Distinct for accumulator should not be null",globalStats.countDistinct!=null);
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:OperatorStatsAccumulatorTest.java

示例11: testTupleSortingDualParallelism1

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testTupleSortingDualParallelism1() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
	ds.writeAsCsv(resultPath)
		.sortLocalOutput(1, Order.DESCENDING).sortLocalOutput(0, Order.ASCENDING)
		.setParallelism(1);

	env.execute();

	String expected = "16,6,Comment#10\n" +
			"17,6,Comment#11\n" +
			"18,6,Comment#12\n" +
			"19,6,Comment#13\n" +
			"20,6,Comment#14\n" +
			"21,6,Comment#15\n" +
			"11,5,Comment#5\n" +
			"12,5,Comment#6\n" +
			"13,5,Comment#7\n" +
			"14,5,Comment#8\n" +
			"15,5,Comment#9\n" +
			"7,4,Comment#1\n" +
			"8,4,Comment#2\n" +
			"9,4,Comment#3\n" +
			"10,4,Comment#4\n" +
			"4,3,Hello world, how are you?\n" +
			"5,3,I am fine.\n" +
			"6,3,Luke Skywalker\n" +
			"2,2,Hello\n" +
			"3,2,Hello world\n" +
			"1,1,Hi\n";

	compareResultsByLinesInMemoryWithStrictOrder(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:36,代码来源:DataSinkITCase.java

示例12: testField

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
private void testField(final String fieldName) throws Exception {
	before();

	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	Path in = new Path(inFile.getAbsoluteFile().toURI());

	AvroInputFormat<User> users = new AvroInputFormat<User>(in, User.class);
	DataSet<User> usersDS = env.createInput(users);

	DataSet<Object> res = usersDS.groupBy(fieldName).reduceGroup(new GroupReduceFunction<User, Object>() {
		@Override
		public void reduce(Iterable<User> values, Collector<Object> out) throws Exception {
			for(User u : values) {
				out.collect(u.get(fieldName));
			}
		}
	});
	res.writeAsText(resultPath);
	env.execute("Simple Avro read job");

	// test if automatic registration of the Types worked
	ExecutionConfig ec = env.getConfig();
	Assert.assertTrue(ec.getRegisteredKryoTypes().contains(org.apache.flink.api.io.avro.generated.Fixed16.class));

	if(fieldName.equals("name")) {
		expected = "Alyssa\nCharlie";
	} else if(fieldName.equals("type_enum")) {
		expected = "GREEN\nRED\n";
	} else if(fieldName.equals("type_double_test")) {
		expected = "123.45\n1.337\n";
	} else {
		Assert.fail("Unknown field");
	}

	after();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:37,代码来源:AvroPojoTest.java

示例13: testConfigurationViaJobConf

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testConfigurationViaJobConf() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	JobConf conf = new JobConf();
	conf.set("my.cntPrefix", "Hello");

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper2());

	DataSet<Tuple2<IntWritable, IntWritable>> helloCnts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(
					new ConfigurableCntReducer(), conf));

	String resultPath = tempFolder.newFile().toURI().toString();

	helloCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,0)\n"+
			"(1,0)\n" +
			"(2,1)\n" +
			"(3,1)\n" +
			"(4,1)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:29,代码来源:HadoopReduceFunctionITCase.java

示例14: testProgram

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	IterativeDataSet<Long> iteration = env.generateSequence(1, 10).iterate(100);
	iteration.closeWith(iteration)
		.output(new LocalCollectionOutputFormat<Long>(result));

	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:IdentityIterationITCase.java

示例15: testProgram

import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<String> stringDs = env.fromElements("aa", "ab", "ac", "ad");
	DataSet<String> flatMappedDs = stringDs.flatMap((s, out) -> out.collect(s.replace("a", "b")));
	flatMappedDs.writeAsText(resultPath);
	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:10,代码来源:FlatMapITCase.java


注:本文中的org.apache.flink.api.java.ExecutionEnvironment.execute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。