当前位置: 首页>>代码示例>>Java>>正文


Java HadoopReduceCombineFunction类代码示例

本文整理汇总了Java中org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction的典型用法代码示例。如果您正苦于以下问题:Java HadoopReduceCombineFunction类的具体用法?Java HadoopReduceCombineFunction怎么用?Java HadoopReduceCombineFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HadoopReduceCombineFunction类属于org.apache.flink.hadoopcompatibility.mapred包,在下文中一共展示了HadoopReduceCombineFunction类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testStandardCountingWithCombiner

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
@Test
public void testStandardCountingWithCombiner() throws Exception{
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper1());

	DataSet<Tuple2<IntWritable, IntWritable>> counts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new SumReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	counts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,5)\n" +
			"(1,6)\n" +
			"(2,6)\n" +
			"(3,4)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:HadoopReduceCombineFunctionITCase.java

示例2: testUngroupedHadoopReducer

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
@Test
public void testUngroupedHadoopReducer() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper2());

	DataSet<Tuple2<IntWritable, IntWritable>> sum = ds.
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new SumReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	sum.writeAsText(resultPath);
	env.execute();

	String expected = "(0,231)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:HadoopReduceCombineFunctionITCase.java

示例3: testCombiner

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
@Test
public void testCombiner() throws Exception {
	org.junit.Assume.assumeThat(mode, new IsEqual<TestExecutionMode>(TestExecutionMode.CLUSTER));
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper3());

	DataSet<Tuple2<IntWritable, IntWritable>> counts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new KeyChangingReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	counts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,5)\n" +
			"(1,6)\n" +
			"(2,5)\n" +
			"(3,5)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:HadoopReduceCombineFunctionITCase.java

示例4: testStandardCountingWithCombiner

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
@Test
public void testStandardCountingWithCombiner() throws Exception{
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper1());

	DataSet<Tuple2<IntWritable, IntWritable>> counts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new SumReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	counts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,5)\n"+
			"(1,6)\n" +
			"(2,6)\n" +
			"(3,4)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:HadoopReduceCombineFunctionITCase.java

示例5: testCombiner

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
@Test
public void testCombiner() throws Exception {
	org.junit.Assume.assumeThat(mode, new IsEqual<TestExecutionMode>(TestExecutionMode.CLUSTER));
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, IntWritable>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper3());

	DataSet<Tuple2<IntWritable, IntWritable>> counts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceCombineFunction<IntWritable, IntWritable, IntWritable, IntWritable>(
					new SumReducer(), new KeyChangingReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	counts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,5)\n"+
			"(1,6)\n" +
			"(2,5)\n" +
			"(3,5)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:HadoopReduceCombineFunctionITCase.java

示例6: main

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	if (args.length < 2) {
		System.err.println("Usage: WordCount <input path> <result path>");
		return;
	}

	final String inputPath = args[0];
	final String outputPath = args[1];

	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	// Set up the Hadoop Input Format
	HadoopInputFormat<LongWritable, Text> hadoopInputFormat = new HadoopInputFormat<LongWritable, Text>(new TextInputFormat(), LongWritable.class, Text.class, new JobConf());
	TextInputFormat.addInputPath(hadoopInputFormat.getJobConf(), new Path(inputPath));

	// Create a Flink job with it
	DataSet<Tuple2<LongWritable, Text>> text = env.createInput(hadoopInputFormat);

	DataSet<Tuple2<Text, LongWritable>> words =
			text.flatMap(new HadoopMapFunction<LongWritable, Text, Text, LongWritable>(new Tokenizer()))
				.groupBy(0).reduceGroup(new HadoopReduceCombineFunction<Text, LongWritable, Text, LongWritable>(new Counter(), new Counter()));

	// Set up Hadoop Output Format
	HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat =
			new HadoopOutputFormat<Text, LongWritable>(new TextOutputFormat<Text, LongWritable>(), new JobConf());
	hadoopOutputFormat.getJobConf().set("mapred.textoutputformat.separator", " ");
	TextOutputFormat.setOutputPath(hadoopOutputFormat.getJobConf(), new Path(outputPath));

	// Output & Execute
	words.output(hadoopOutputFormat).setParallelism(1);
	env.execute("Hadoop Compat WordCount");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:HadoopMapredCompatWordCount.java

示例7: main

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	if (args.length < 2) {
		System.err.println("Usage: WordCount <input path> <result path>");
		return;
	}
	
	final String inputPath = args[0];
	final String outputPath = args[1];
	
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	
	// Set up the Hadoop Input Format
	HadoopInputFormat<LongWritable, Text> hadoopInputFormat = new HadoopInputFormat<LongWritable, Text>(new TextInputFormat(), LongWritable.class, Text.class, new JobConf());
	TextInputFormat.addInputPath(hadoopInputFormat.getJobConf(), new Path(inputPath));
	
	// Create a Flink job with it
	DataSet<Tuple2<LongWritable, Text>> text = env.createInput(hadoopInputFormat);
	
	DataSet<Tuple2<Text, LongWritable>> words = 
			text.flatMap(new HadoopMapFunction<LongWritable, Text, Text, LongWritable>(new Tokenizer()))
				.groupBy(0).reduceGroup(new HadoopReduceCombineFunction<Text, LongWritable, Text, LongWritable>(new Counter(), new Counter()));
	
	// Set up Hadoop Output Format
	HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat = 
			new HadoopOutputFormat<Text, LongWritable>(new TextOutputFormat<Text, LongWritable>(), new JobConf());
	hadoopOutputFormat.getJobConf().set("mapred.textoutputformat.separator", " ");
	TextOutputFormat.setOutputPath(hadoopOutputFormat.getJobConf(), new Path(outputPath));
	
	// Output & Execute
	words.output(hadoopOutputFormat).setParallelism(1);
	env.execute("Hadoop Compat WordCount");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:HadoopMapredCompatWordCount.java


注:本文中的org.apache.flink.hadoopcompatibility.mapred.HadoopReduceCombineFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。