当前位置: 首页>>代码示例>>Java>>正文


Java ReduceFunction类代码示例

本文整理汇总了Java中org.apache.flink.api.common.functions.ReduceFunction的典型用法代码示例。如果您正苦于以下问题:Java ReduceFunction类的具体用法?Java ReduceFunction怎么用?Java ReduceFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ReduceFunction类属于org.apache.flink.api.common.functions包,在下文中一共展示了ReduceFunction类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testTableInputFormat

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testTableInputFormat() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(4);

	DataSet<Tuple1<Integer>> result = env
		.createInput(new InputFormatForTestTable())
		.reduce(new ReduceFunction<Tuple1<Integer>>(){

			@Override
			public Tuple1<Integer> reduce(Tuple1<Integer> v1, Tuple1<Integer> v2) throws Exception {
				return Tuple1.of(v1.f0 + v2.f0);
			}
		});

	List<Tuple1<Integer>> resultSet = result.collect();

	assertEquals(1, resultSet.size());
	assertEquals(360, (int) resultSet.get(0).f0);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:HBaseConnectorITCase.java

示例2: testProgram

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	DataSet<String> text = env.readTextFile(textPath);

	DataSet<WC> counts = text
			.flatMap(new Tokenizer())
			.groupBy("complex.someTest")
			.reduce(new ReduceFunction<WC>() {
				private static final long serialVersionUID = 1L;
				public WC reduce(WC value1, WC value2) {
					return new WC(value1.complex.someTest, value1.count + value2.count);
				}
			});

	counts.writeAsText(resultPath);

	env.execute("WordCount with custom data types example");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:20,代码来源:WordCountNestedPOJOITCase.java

示例3: testProgram

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

	DataSet<String> text = env.readTextFile(textPath);

	DataSet<WC> counts = text
			.flatMap(new Tokenizer())
			.groupBy("word")
			.reduce(new ReduceFunction<WC>() {
				private static final long serialVersionUID = 1L;

				public WC reduce(WC value1, WC value2) {
					return new WC(value1.word, value1.count + value2.count);
				}
			});

	counts.writeAsText(resultPath);

	env.execute("WordCount with custom data types example");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:WordCountSimplePOJOITCase.java

示例4: testKeyedReduce

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testKeyedReduce() throws Exception {
	
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	if (objectReuse) {
		env.getConfig().enableObjectReuse();
	} else {
		env.getConfig().disableObjectReuse();
	}

	DataSet<Tuple2<String, Integer>> input = env.fromCollection(REDUCE_DATA);
	
	DataSet<Tuple2<String, Integer>> result = input
		.groupBy(0)
		.reduce(new ReduceFunction<Tuple2<String, Integer>>() {

			@Override
			public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) {
				value2.f1 += value1.f1;
				return value2;
			}
		});

	Tuple2<String, Integer> res = result.collect().get(0);
	assertEquals(new Tuple2<>("a", 60), res);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:ObjectReuseITCase.java

示例5: testProgram

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(4);

	DataSet<String> initialInput = env.fromElements("1", "1", "1", "1", "1", "1", "1", "1");

	IterativeDataSet<String> iteration = initialInput.iterate(5).name("Loop");

	DataSet<String> sumReduce = iteration.reduce(new ReduceFunction<String>(){
		@Override
		public String reduce(String value1, String value2) throws Exception {
			return value1;
		}
	}).name("Compute sum (Reduce)");

	List<String> result = iteration.closeWith(sumReduce).collect();

	compareResultAsText(result, EXPECTED);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:IterationWithAllReducerITCase.java

示例6: testKeyedReduce

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testKeyedReduce() throws Exception {

	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	if (objectReuse) {
		env.getConfig().enableObjectReuse();
	} else {
		env.getConfig().disableObjectReuse();
	}

	DataSet<Tuple2<String, Integer>> input = env.fromCollection(REDUCE_DATA);

	DataSet<Tuple2<String, Integer>> result = input
		.groupBy(0)
		.reduce(new ReduceFunction<Tuple2<String, Integer>>() {

			@Override
			public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) {
				value2.f1 += value1.f1;
				return value2;
			}
		});

	Tuple2<String, Integer> res = result.collect().get(0);
	assertEquals(new Tuple2<>("a", 60), res);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:ObjectReuseITCase.java

示例7: summarize

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
/**
 * Summarize a DataSet of Tuples by collecting single pass statistics for all columns.
 *
 * <p>Example usage:
 * <pre>
 * {@code
 * Dataset<Tuple3<Double, String, Boolean>> input = // [...]
 * Tuple3<NumericColumnSummary,StringColumnSummary, BooleanColumnSummary> summary = DataSetUtils.summarize(input)
 *
 * summary.f0.getStandardDeviation()
 * summary.f1.getMaxLength()
 * }
 * </pre>
 * @return the summary as a Tuple the same width as input rows
 */
public static <R extends Tuple, T extends Tuple> R summarize(DataSet<T> input) throws Exception {
	if (!input.getType().isTupleType()) {
		throw new IllegalArgumentException("summarize() is only implemented for DataSet's of Tuples");
	}
	final TupleTypeInfoBase<?> inType = (TupleTypeInfoBase<?>) input.getType();
	DataSet<TupleSummaryAggregator<R>> result = input.mapPartition(new MapPartitionFunction<T, TupleSummaryAggregator<R>>() {
		@Override
		public void mapPartition(Iterable<T> values, Collector<TupleSummaryAggregator<R>> out) throws Exception {
			TupleSummaryAggregator<R> aggregator = SummaryAggregatorFactory.create(inType);
			for (Tuple value : values) {
				aggregator.aggregate(value);
			}
			out.collect(aggregator);
		}
	}).reduce(new ReduceFunction<TupleSummaryAggregator<R>>() {
		@Override
		public TupleSummaryAggregator<R> reduce(TupleSummaryAggregator<R> agg1, TupleSummaryAggregator<R> agg2) throws Exception {
			agg1.combine(agg2);
			return agg1;
		}
	});
	return result.collect().get(0).result();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:DataSetUtils.java

示例8: translateSelectorFunctionReducer

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
private static <T, K> org.apache.flink.api.common.operators.SingleInputOperator<?, T, ?> translateSelectorFunctionReducer(
	SelectorFunctionKeys<T, ?> rawKeys,
	ReduceFunction<T> function,
	TypeInformation<T> inputType,
	String name,
	Operator<T> input,
	int parallelism,
	CombineHint hint) {
	@SuppressWarnings("unchecked")
	final SelectorFunctionKeys<T, K> keys = (SelectorFunctionKeys<T, K>) rawKeys;

	TypeInformation<Tuple2<K, T>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys);
	Operator<Tuple2<K, T>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys);

	PlanUnwrappingReduceOperator<T, K> reducer = new PlanUnwrappingReduceOperator<>(function, keys, name, inputType, typeInfoWithKey);
	reducer.setInput(keyedInput);
	reducer.setParallelism(parallelism);
	reducer.setCombineHint(hint);

	return KeyFunctions.appendKeyRemover(reducer, keys);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:ReduceOperator.java

示例9: translateSelectorFunctionDistinct

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
private static <IN, K> org.apache.flink.api.common.operators.SingleInputOperator<?, IN, ?> translateSelectorFunctionDistinct(
		SelectorFunctionKeys<IN, ?> rawKeys,
		ReduceFunction<IN> function,
		TypeInformation<IN> outputType,
		String name,
		Operator<IN> input,
		int parallelism,
		CombineHint hint) {
	@SuppressWarnings("unchecked")
	final SelectorFunctionKeys<IN, K> keys = (SelectorFunctionKeys<IN, K>) rawKeys;

	TypeInformation<Tuple2<K, IN>> typeInfoWithKey = KeyFunctions.createTypeWithKey(keys);
	Operator<Tuple2<K, IN>> keyedInput = KeyFunctions.appendKeyExtractor(input, keys);

	PlanUnwrappingReduceOperator<IN, K> reducer =
			new PlanUnwrappingReduceOperator<>(function, keys, name, outputType, typeInfoWithKey);
	reducer.setInput(keyedInput);
	reducer.setCombineHint(hint);
	reducer.setParallelism(parallelism);

	return KeyFunctions.appendKeyRemover(reducer, keys);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:DistinctOperator.java

示例10: testValueStateDescriptorEagerSerializer

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testValueStateDescriptorEagerSerializer() throws Exception {

	@SuppressWarnings("unchecked")
	ReduceFunction<String> reducer = mock(ReduceFunction.class); 
	
	TypeSerializer<String> serializer = new KryoSerializer<>(String.class, new ExecutionConfig());
	
	ReducingStateDescriptor<String> descr = 
			new ReducingStateDescriptor<String>("testName", reducer, serializer);
	
	assertEquals("testName", descr.getName());
	assertNotNull(descr.getSerializer());
	assertEquals(serializer, descr.getSerializer());

	ReducingStateDescriptor<String> copy = CommonTestUtils.createCopySerializable(descr);

	assertEquals("testName", copy.getName());
	assertNotNull(copy.getSerializer());
	assertEquals(serializer, copy.getSerializer());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:ReducingStateDescriptorTest.java

示例11: testValueStateDescriptorLazySerializer

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testValueStateDescriptorLazySerializer() throws Exception {

	@SuppressWarnings("unchecked")
	ReduceFunction<Path> reducer = mock(ReduceFunction.class);
	
	// some different registered value
	ExecutionConfig cfg = new ExecutionConfig();
	cfg.registerKryoType(TaskInfo.class);

	ReducingStateDescriptor<Path> descr =
			new ReducingStateDescriptor<Path>("testName", reducer, Path.class);

	try {
		descr.getSerializer();
		fail("should cause an exception");
	} catch (IllegalStateException ignored) {}

	descr.initializeSerializerUnlessSet(cfg);
	
	assertNotNull(descr.getSerializer());
	assertTrue(descr.getSerializer() instanceof KryoSerializer);

	assertTrue(((KryoSerializer<?>) descr.getSerializer()).getKryo().getRegistration(TaskInfo.class).getId() > 0);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:ReducingStateDescriptorTest.java

示例12: testSerializerDuplication

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
/**
 * FLINK-6775
 *
 * Tests that the returned serializer is duplicated. This allows to
 * share the state descriptor.
 */
@SuppressWarnings("unchecked")
@Test
public void testSerializerDuplication() {
	TypeSerializer<String> statefulSerializer = mock(TypeSerializer.class);
	when(statefulSerializer.duplicate()).thenAnswer(new Answer<TypeSerializer<String>>() {
		@Override
		public TypeSerializer<String> answer(InvocationOnMock invocation) throws Throwable {
			return mock(TypeSerializer.class);
		}
	});

	ReduceFunction<String> reducer = mock(ReduceFunction.class);

	ReducingStateDescriptor<String> descr = new ReducingStateDescriptor<>("foobar", reducer, statefulSerializer);

	TypeSerializer<String> serializerA = descr.getSerializer();
	TypeSerializer<String> serializerB = descr.getSerializer();

	// check that the retrieved serializers are not the same
	assertNotSame(serializerA, serializerB);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:28,代码来源:ReducingStateDescriptorTest.java

示例13: ReduceFacade

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) {
	this.reducer = reducer;
	this.outputCollector = outputCollector;
	this.objectReuseEnabled = objectReuseEnabled;
	this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));
	this.reuse = buildSideSerializer.createInstance();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:8,代码来源:InPlaceMutableHashTable.java

示例14: testAllReduceDriverImmutableEmpty

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
@Test
public void testAllReduceDriverImmutableEmpty() {
	try {
		TestTaskContext<ReduceFunction<Tuple2<String, Integer>>, Tuple2<String, Integer>> context =
				new TestTaskContext<ReduceFunction<Tuple2<String,Integer>>, Tuple2<String,Integer>>();
		
		List<Tuple2<String, Integer>> data = DriverTestData.createReduceImmutableData();
		TypeInformation<Tuple2<String, Integer>> typeInfo = TypeExtractor.getForObject(data.get(0));
		MutableObjectIterator<Tuple2<String, Integer>> input = EmptyMutableObjectIterator.get();
		context.setDriverStrategy(DriverStrategy.ALL_REDUCE);
		
		context.setInput1(input, typeInfo.createSerializer(new ExecutionConfig()));
		context.setCollector(new DiscardingOutputCollector<Tuple2<String, Integer>>());
		
		AllReduceDriver<Tuple2<String, Integer>> driver = new AllReduceDriver<Tuple2<String,Integer>>();
		driver.setup(context);
		driver.prepare();
		driver.run();
	}
	catch (Exception e) {
		System.err.println(e.getMessage());
		e.printStackTrace();
		Assert.fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:AllReduceDriverTest.java

示例15: reduce

import org.apache.flink.api.common.functions.ReduceFunction; //导入依赖的package包/类
/**
 * Applies a reduce function to the window. The window function is called for each evaluation
 * of the window for each key individually. The output of the reduce function is interpreted
 * as a regular non-windowed stream.
 *
 * <p>This window will try and incrementally aggregate data as much as the window policies permit.
 * For example, tumbling time windows can aggregate the data, meaning that only one element per
 * key is stored. Sliding time windows will aggregate on the granularity of the slide interval,
 * so a few elements are stored per key (one per slide interval).
 * Custom windows may not be able to incrementally aggregate, or may need to store extra values
 * in an aggregation tree.
 *
 * @param function The reduce function.
 * @return The data stream that is the result of applying the reduce function to the window.
 */
@SuppressWarnings("unchecked")
public SingleOutputStreamOperator<T> reduce(ReduceFunction<T> function) {
	if (function instanceof RichFunction) {
		throw new UnsupportedOperationException("ReduceFunction of reduce can not be a RichFunction. " +
				"Please use reduce(ReduceFunction, WindowFunction) instead.");
	}

	//clean the closure
	function = input.getExecutionEnvironment().clean(function);

	String callLocation = Utils.getCallLocationName();
	String udfName = "AllWindowedStream." + callLocation;

	return reduce(function, new PassThroughAllWindowFunction<W, T>());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:AllWindowedStream.java


注:本文中的org.apache.flink.api.common.functions.ReduceFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。