当前位置: 首页>>代码示例>>Java>>正文


Java JobExecutionResult.getAllAccumulatorResults方法代码示例

本文整理汇总了Java中org.apache.flink.api.common.JobExecutionResult.getAllAccumulatorResults方法的典型用法代码示例。如果您正苦于以下问题:Java JobExecutionResult.getAllAccumulatorResults方法的具体用法?Java JobExecutionResult.getAllAccumulatorResults怎么用?Java JobExecutionResult.getAllAccumulatorResults使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.api.common.JobExecutionResult的用法示例。


在下文中一共展示了JobExecutionResult.getAllAccumulatorResults方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: executeProgram

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException {
	logAndSysout("Starting execution of program");

	final JobSubmissionResult result = client.run(program, parallelism);

	if (null == result) {
		throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " +
			"ExecutionEnvironment.execute()");
	}

	if (result.isJobExecutionResult()) {
		logAndSysout("Program execution finished");
		JobExecutionResult execResult = result.getJobExecutionResult();
		System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
		System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
		Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
		if (accumulatorsResult.size() > 0) {
			System.out.println("Accumulator Results: ");
			System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult));
		}
	} else {
		logAndSysout("Job has been submitted with JobID " + result.getJobID());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:CliFrontend.java

示例2: executeProgram

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
protected int executeProgram(PackagedProgram program, Client client, int parallelism) {
	JobExecutionResult execResult;
	try {
		client.setPrintStatusDuringExecution(true);
		execResult = client.run(program, parallelism, true);
	}
	catch (ProgramInvocationException e) {
		return handleError(e);
	}
	finally {
		program.deleteExtractedLibraries();
	}
	
	// we come here after the job has finished
	if (execResult != null) {
		System.out.println("Job Runtime: " + execResult.getNetRuntime());
		Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
		if (accumulatorsResult.size() > 0) {
			System.out.println("Accumulator Results: ");
			System.out.println(AccumulatorHelper.getResultsFormated(accumulatorsResult));
		}
	}
	return 0;
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:25,代码来源:CliFrontend.java

示例3: run

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
@Override
public PipelineResult run(Pipeline pipeline) {
  logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);

  MetricsEnvironment.setMetricsSupported(true);

  LOG.info("Executing pipeline using FlinkRunner.");

  FlinkPipelineExecutionEnvironment env = new FlinkPipelineExecutionEnvironment(options);

  LOG.info("Translating pipeline to Flink program.");
  env.translate(this, pipeline);

  JobExecutionResult result;
  try {
    LOG.info("Starting execution of Flink program.");
    result = env.executePipeline();
  } catch (Exception e) {
    LOG.error("Pipeline execution failed", e);
    throw new RuntimeException("Pipeline execution failed", e);
  }

  if (result instanceof DetachedEnvironment.DetachedJobExecutionResult) {
    LOG.info("Pipeline submitted in Detached mode");
    return new FlinkDetachedRunnerResult();
  } else {
    LOG.info("Execution finished in {} msecs", result.getNetRuntime());
    Map<String, Object> accumulators = result.getAllAccumulatorResults();
    if (accumulators != null && !accumulators.isEmpty()) {
      LOG.info("Final accumulator values:");
      for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
        LOG.info("{} : {}", entry.getKey(), entry.getValue());
      }
    }

    return new FlinkRunnerResult(accumulators, result.getNetRuntime());
  }
}
 
开发者ID:apache,项目名称:beam,代码行数:39,代码来源:FlinkRunner.java

示例4: run

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
public FlinkRunnerResult run(Pipeline pipeline, int parallelism) {
	if (parallelism <= 0 && parallelism != -1) {
		throw new IllegalArgumentException("Parallelism must be positive or -1 for default");
	}
	
	LOG.info("Executing pipeline using the FlinkLocalPipelineRunner.");
	
	ExecutionEnvironment env = parallelism == -1 ?
			ExecutionEnvironment.createLocalEnvironment() :
			ExecutionEnvironment.createLocalEnvironment(parallelism);
	
	LOG.info("Translating pipeline to Flink program.");
	
	FlinkTranslator translator = new FlinkTranslator(env);
	translator.translate(pipeline);
	
	LOG.info("Starting execution of Flink program.");
	
	JobExecutionResult result;
	try {
		result = env.execute();
	}
	catch (Exception e) {
		LOG.error("Pipeline execution failed", e);
		throw new RuntimeException("Pipeline execution failed", e);
	}
	
	LOG.info("Execution finished in {} msecs", result.getNetRuntime());
	
	Map<String, Object> accumulators = result.getAllAccumulatorResults();
	if (accumulators != null && !accumulators.isEmpty()) {
		LOG.info("Final aggregator values:");
		
		for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
			LOG.info("{} : {}", entry.getKey(), entry.getValue());
		}
	}

	return new ExecutionRunnerResult(accumulators, result.getNetRuntime());
}
 
开发者ID:StephanEwen,项目名称:flink-dataflow,代码行数:41,代码来源:FlinkLocalPipelineRunner.java

示例5: testAccumulatorAllStatistics

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
@Test
	public void testAccumulatorAllStatistics() throws Exception {

		String input = "";

		Random rand = new Random();

		for (int i = 1; i < 1000; i++) {
			if(rand.nextDouble()<0.2){
				input+=String.valueOf(rand.nextInt(4))+"\n";
			}else{
				input+=String.valueOf(rand.nextInt(100))+"\n";
			}
		}

		String inputFile = createTempFile("datapoints.txt", input);

		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		env.getConfig().disableSysoutLogging();

		OperatorStatisticsConfig operatorStatisticsConfig =
				new OperatorStatisticsConfig(OperatorStatisticsConfig.CountDistinctAlgorithm.HYPERLOGLOG,
											OperatorStatisticsConfig.HeavyHitterAlgorithm.LOSSY_COUNTING);

		env.readTextFile(inputFile).
				flatMap(new StringToInt(operatorStatisticsConfig)).
				output(new DiscardingOutputFormat<Tuple1<Integer>>());

		JobExecutionResult result = env.execute();

		OperatorStatistics globalStats = result.getAccumulatorResult(ACCUMULATOR_NAME);
//		System.out.println("Global Stats");
//		System.out.println(globalStats.toString());

		OperatorStatistics merged = null;

		Map<String,Object> accResults = result.getAllAccumulatorResults();
		for (String accumulatorName:accResults.keySet()){
			if (accumulatorName.contains(ACCUMULATOR_NAME+"-")){
				OperatorStatistics localStats = (OperatorStatistics) accResults.get(accumulatorName);
				LOG.debug("Local Stats: " + accumulatorName);
				LOG.debug(localStats.toString());
				if (merged == null){
					merged = localStats.clone();
				}else {
					merged.merge(localStats);
				}
			}
		}

		LOG.debug("Local Stats Merged: \n");
		LOG.debug(merged.toString());

		Assert.assertEquals("Global cardinality should be 999", 999, globalStats.cardinality);
		Assert.assertEquals("Count distinct estimate should be around 100 and is "+globalStats.estimateCountDistinct()
				, 100.0, (double)globalStats.estimateCountDistinct(),5.0);
		Assert.assertTrue("The total number of heavy hitters should be between 0 and 5."
				, globalStats.getHeavyHitters().size() > 0 && globalStats.getHeavyHitters().size() <= 5);
		Assert.assertEquals("Min when merging the local accumulators should correspond with min" +
				"of the global accumulator",merged.getMin(),globalStats.getMin());
		Assert.assertEquals("Max resulting from merging the local accumulators should correspond to" +
				"max of the global accumulator",merged.getMax(),globalStats.getMax());
		Assert.assertEquals("Count distinct when merging the local accumulators should correspond to " +
				"count distinct in the global accumulator",merged.estimateCountDistinct(),globalStats.estimateCountDistinct());
		Assert.assertEquals("The number of heavy hitters when merging the local accumulators should correspond " +
				"to the number of heavy hitters in the global accumulator",merged.getHeavyHitters().size(),globalStats.getHeavyHitters().size());
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:68,代码来源:OperatorStatsAccumulatorTest.java

示例6: main

import org.apache.flink.api.common.JobExecutionResult; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		long numVertices = 41652230;

		double threshold = 0.005 / numVertices;
		double dampeningFactor = 0.85;

		String adjacencyPath = args.length > 1 ? args[0] : "/data/demodata/pagerank/edges/edges.csv";
		String outpath = args.length > 2 ? args[1] : "/data/demodata/pagerank/adacency_comp";
		int numIterations = args.length > 3 ? Integer.valueOf(args[2]) : 100;

		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	//	env.setDegreeOfParallelism(4);

		DataSet<Tuple2<Long, long[]>> adjacency = env.readTextFile(adjacencyPath).map(new AdjacencyBuilder());
		DataSet<Tuple2<Long, long[]>> adjacency2 = env.readTextFile(adjacencyPath).map(new AdjacencyBuilder());


		DataSet<Tuple2<Long, Double>> initialRanks = adjacency
				.flatMap(new InitialMessageBuilder(numVertices, dampeningFactor))
				.groupBy(0)
				.reduceGroup(new Agg());

		DataSet<Tuple2<Long, Double>> initialDeltas = initialRanks.map(new InitialDeltaBuilder(numVertices));


		// ---------- iterative part ---------

		DeltaIteration<Tuple2<Long, Double>, Tuple2<Long, Double>> adaptiveIteration = initialRanks.iterateDelta(initialDeltas, numIterations, 0);

		DataSet<Tuple2<Long, Double>> deltas = adaptiveIteration.getWorkset()
				.join(adjacency2).where(0).equalTo(0).with(new DeltaDistributor(0.85))
				.groupBy(0)
				.reduceGroup(new AggAndFilter(threshold));

		DataSet<Tuple2<Long, Double>> rankUpdates = adaptiveIteration.getSolutionSet()
				.join(deltas).where(0).equalTo(0).with(new SolutionJoin());

		adaptiveIteration.closeWith(rankUpdates, deltas)
				.writeAsCsv(outpath + "_adapt", WriteMode.OVERWRITE);


//		System.out.println(env.getExecutionPlan());
		JobExecutionResult result = env.execute("Adaptive Page Rank");

		Map<String, Object> accumulators = result.getAllAccumulatorResults();
		List<String> keys = new ArrayList<String>(accumulators.keySet());
		Collections.sort(keys);
		for (String key : keys) {
			System.out.println(key + " : " + accumulators.get(key));
		}
	}
 
开发者ID:project-flink,项目名称:flink-perf,代码行数:53,代码来源:AdaptivePageRank.java


注:本文中的org.apache.flink.api.common.JobExecutionResult.getAllAccumulatorResults方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。