本文整理汇总了Java中org.apache.beam.sdk.transforms.Combine.GroupedValues方法的典型用法代码示例。如果您正苦于以下问题:Java Combine.GroupedValues方法的具体用法?Java Combine.GroupedValues怎么用?Java Combine.GroupedValues使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.beam.sdk.transforms.Combine
的用法示例。
在下文中一共展示了Combine.GroupedValues方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: translateHelper
import org.apache.beam.sdk.transforms.Combine; //导入方法依赖的package包/类
private <K, InputT, OutputT> void translateHelper(
final CombineGroupedValues<K, InputT, OutputT> primitiveTransform,
TranslationContext context) {
Combine.GroupedValues<K, InputT, OutputT> originalTransform =
primitiveTransform.getOriginalCombine();
StepTranslationContext stepContext =
context.addStep(primitiveTransform, "CombineValues");
translateInputs(
stepContext,
context.getInput(primitiveTransform),
originalTransform.getSideInputs(),
context);
AppliedCombineFn<? super K, ? super InputT, ?, OutputT> fn =
originalTransform.getAppliedFn(
context.getInput(primitiveTransform).getPipeline().getCoderRegistry(),
context.getInput(primitiveTransform).getCoder(),
context.getInput(primitiveTransform).getWindowingStrategy());
stepContext.addEncodingInput(fn.getAccumulatorCoder());
stepContext.addInput(
PropertyNames.SERIALIZED_FN, byteArrayToJsonString(serializeToByteArray(fn)));
stepContext.addOutput(context.getOutput(primitiveTransform));
}
示例2: combineGrouped
import org.apache.beam.sdk.transforms.Combine; //导入方法依赖的package包/类
private static <K, InputT, OutputT> TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>
combineGrouped() {
return new TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>() {
@Override
public void evaluate(
Combine.GroupedValues<K, InputT, OutputT> transform,
EvaluationContext context) {
@SuppressWarnings("unchecked")
CombineWithContext.CombineFnWithContext<InputT, ?, OutputT> combineFn =
(CombineWithContext.CombineFnWithContext<InputT, ?, OutputT>)
CombineFnUtil.toFnWithContext(transform.getFn());
final SparkKeyedCombineFn<K, InputT, ?, OutputT> sparkCombineFn =
new SparkKeyedCombineFn<>(combineFn, context.getSerializableOptions(),
TranslationUtils.getSideInputs(transform.getSideInputs(), context),
context.getInput(transform).getWindowingStrategy());
@SuppressWarnings("unchecked")
JavaRDD<WindowedValue<KV<K, Iterable<InputT>>>> inRDD =
((BoundedDataset<KV<K, Iterable<InputT>>>) context.borrowDataset(transform))
.getRDD();
JavaRDD<WindowedValue<KV<K, OutputT>>> outRDD = inRDD.map(
new Function<WindowedValue<KV<K, Iterable<InputT>>>,
WindowedValue<KV<K, OutputT>>>() {
@Override
public WindowedValue<KV<K, OutputT>> call(
WindowedValue<KV<K, Iterable<InputT>>> in) throws Exception {
return WindowedValue.of(
KV.of(in.getValue().getKey(), sparkCombineFn.apply(in)),
in.getTimestamp(),
in.getWindows(),
in.getPane());
}
});
context.putDataset(transform, new BoundedDataset<>(outRDD));
}
@Override
public String toNativeString() {
return "map(new <fn>())";
}
};
}
示例3: combineGrouped
import org.apache.beam.sdk.transforms.Combine; //导入方法依赖的package包/类
private static <K, InputT, OutputT> TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>
combineGrouped() {
return new TransformEvaluator<Combine.GroupedValues<K, InputT, OutputT>>() {
@Override
public void evaluate(final Combine.GroupedValues<K, InputT, OutputT> transform,
EvaluationContext context) {
// get the applied combine function.
PCollection<? extends KV<K, ? extends Iterable<InputT>>> input =
context.getInput(transform);
final WindowingStrategy<?, ?> windowingStrategy = input.getWindowingStrategy();
@SuppressWarnings("unchecked")
final CombineWithContext.CombineFnWithContext<InputT, ?, OutputT> fn =
(CombineWithContext.CombineFnWithContext<InputT, ?, OutputT>)
CombineFnUtil.toFnWithContext(transform.getFn());
@SuppressWarnings("unchecked")
UnboundedDataset<KV<K, Iterable<InputT>>> unboundedDataset =
((UnboundedDataset<KV<K, Iterable<InputT>>>) context.borrowDataset(transform));
JavaDStream<WindowedValue<KV<K, Iterable<InputT>>>> dStream = unboundedDataset.getDStream();
final SerializablePipelineOptions options = context.getSerializableOptions();
final SparkPCollectionView pviews = context.getPViews();
JavaDStream<WindowedValue<KV<K, OutputT>>> outStream = dStream.transform(
new Function<JavaRDD<WindowedValue<KV<K, Iterable<InputT>>>>,
JavaRDD<WindowedValue<KV<K, OutputT>>>>() {
@Override
public JavaRDD<WindowedValue<KV<K, OutputT>>>
call(JavaRDD<WindowedValue<KV<K, Iterable<InputT>>>> rdd)
throws Exception {
SparkKeyedCombineFn<K, InputT, ?, OutputT> combineFnWithContext =
new SparkKeyedCombineFn<>(fn, options,
TranslationUtils.getSideInputs(transform.getSideInputs(),
new JavaSparkContext(rdd.context()), pviews),
windowingStrategy);
return rdd.map(
new TranslationUtils.CombineGroupedValues<>(combineFnWithContext));
}
});
context.putDataset(transform,
new UnboundedDataset<>(outStream, unboundedDataset.getStreamSources()));
}
@Override
public String toNativeString() {
return "map(new <fn>())";
}
};
}
示例4: getOriginalCombine
import org.apache.beam.sdk.transforms.Combine; //导入方法依赖的package包/类
public Combine.GroupedValues<K, InputT, OutputT> getOriginalCombine() {
return original;
}