当前位置: 首页>>代码示例>>Java>>正文


Java HadoopReduceFunction类代码示例

本文整理汇总了Java中org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction的典型用法代码示例。如果您正苦于以下问题:Java HadoopReduceFunction类的具体用法?Java HadoopReduceFunction怎么用?Java HadoopReduceFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HadoopReduceFunction类属于org.apache.flink.hadoopcompatibility.mapred包,在下文中一共展示了HadoopReduceFunction类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testStandardGrouping

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testStandardGrouping() throws Exception{
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper1());

	DataSet<Tuple2<IntWritable, IntWritable>> commentCnts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(new CommentCntReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	commentCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,0)\n" +
			"(1,3)\n" +
			"(2,5)\n" +
			"(3,5)\n" +
			"(4,2)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:HadoopReduceFunctionITCase.java

示例2: testStandardGrouping

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testStandardGrouping() throws Exception{
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper1());

	DataSet<Tuple2<IntWritable, IntWritable>> commentCnts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(new CommentCntReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	commentCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,0)\n"+
			"(1,3)\n" +
			"(2,5)\n" +
			"(3,5)\n" +
			"(4,2)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:HadoopReduceFunctionITCase.java

示例3: testConfigurationViaJobConf

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testConfigurationViaJobConf() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	JobConf conf = new JobConf();
	conf.set("my.cntPrefix", "Hello");

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper2());

	DataSet<Tuple2<IntWritable, IntWritable>> helloCnts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(
					new ConfigurableCntReducer(), conf));

	String resultPath = tempFolder.newFile().toURI().toString();

	helloCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,0)\n" +
			"(1,0)\n" +
			"(2,1)\n" +
			"(3,1)\n" +
			"(4,1)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:29,代码来源:HadoopReduceFunctionITCase.java

示例4: testConfigurationViaJobConf

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testConfigurationViaJobConf() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	JobConf conf = new JobConf();
	conf.set("my.cntPrefix", "Hello");

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper4());

	DataSet<Tuple2<IntWritable, IntWritable>> hellos = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(
					new ConfigurableCntReducer(), conf));

	String resultPath = tempFolder.newFile().toURI().toString();

	hellos.writeAsText(resultPath);
	env.execute();

	// return expected result
	String expected = "(0,0)\n" +
			"(1,0)\n" +
			"(2,1)\n" +
			"(3,1)\n" +
			"(4,1)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:30,代码来源:HadoopReduceCombineFunctionITCase.java

示例5: testConfigurationViaJobConf

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testConfigurationViaJobConf() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	JobConf conf = new JobConf();
	conf.set("my.cntPrefix", "Hello");

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper2());

	DataSet<Tuple2<IntWritable, IntWritable>> helloCnts = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(
					new ConfigurableCntReducer(), conf));

	String resultPath = tempFolder.newFile().toURI().toString();

	helloCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(0,0)\n"+
			"(1,0)\n" +
			"(2,1)\n" +
			"(3,1)\n" +
			"(4,1)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:29,代码来源:HadoopReduceFunctionITCase.java

示例6: testConfigurationViaJobConf

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testConfigurationViaJobConf() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	JobConf conf = new JobConf();
	conf.set("my.cntPrefix", "Hello");

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env).
			map(new Mapper4());

	DataSet<Tuple2<IntWritable, IntWritable>> hellos = ds.
			groupBy(0).
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(
					new ConfigurableCntReducer(), conf));

	String resultPath = tempFolder.newFile().toURI().toString();

	hellos.writeAsText(resultPath);
	env.execute();

	// return expected result
	String expected = "(0,0)\n"+
			"(1,0)\n" +
			"(2,1)\n" +
			"(3,1)\n" +
			"(4,1)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:30,代码来源:HadoopReduceCombineFunctionITCase.java

示例7: testUngroupedHadoopReducer

import org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction; //导入依赖的package包/类
@Test
public void testUngroupedHadoopReducer() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<Tuple2<IntWritable, Text>> ds = HadoopTestData.getKVPairDataSet(env);

	DataSet<Tuple2<IntWritable, IntWritable>> commentCnts = ds.
			reduceGroup(new HadoopReduceFunction<IntWritable, Text, IntWritable, IntWritable>(new AllCommentCntReducer()));

	String resultPath = tempFolder.newFile().toURI().toString();

	commentCnts.writeAsText(resultPath);
	env.execute();

	String expected = "(42,15)\n";

	compareResultsByLinesInMemory(expected, resultPath);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:19,代码来源:HadoopReduceFunctionITCase.java


注:本文中的org.apache.flink.hadoopcompatibility.mapred.HadoopReduceFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。