本文整理匯總了Java中org.apache.spark.streaming.api.java.JavaStreamingContext.ssc方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaStreamingContext.ssc方法的具體用法?Java JavaStreamingContext.ssc怎麽用?Java JavaStreamingContext.ssc使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.spark.streaming.api.java.JavaStreamingContext
的用法示例。
在下文中一共展示了JavaStreamingContext.ssc方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: read
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static <T, CheckpointMarkT extends CheckpointMark> UnboundedDataset<T> read(
JavaStreamingContext jssc,
SerializablePipelineOptions rc,
UnboundedSource<T, CheckpointMarkT> source,
String stepName) {
SparkPipelineOptions options = rc.get().as(SparkPipelineOptions.class);
Long maxRecordsPerBatch = options.getMaxRecordsPerBatch();
SourceDStream<T, CheckpointMarkT> sourceDStream =
new SourceDStream<>(jssc.ssc(), source, rc, maxRecordsPerBatch);
JavaPairInputDStream<Source<T>, CheckpointMarkT> inputDStream =
JavaPairInputDStream$.MODULE$.fromInputDStream(sourceDStream,
JavaSparkContext$.MODULE$.<Source<T>>fakeClassTag(),
JavaSparkContext$.MODULE$.<CheckpointMarkT>fakeClassTag());
// call mapWithState to read from a checkpointable sources.
JavaMapWithStateDStream<Source<T>, CheckpointMarkT, Tuple2<byte[], Instant>,
Tuple2<Iterable<byte[]>, Metadata>> mapWithStateDStream =
inputDStream.mapWithState(
StateSpec
.function(StateSpecFunctions.<T, CheckpointMarkT>mapSourceFunction(rc, stepName))
.numPartitions(sourceDStream.getNumPartitions()));
// set checkpoint duration for read stream, if set.
checkpointStream(mapWithStateDStream, options);
// report the number of input elements for this InputDStream to the InputInfoTracker.
int id = inputDStream.inputDStream().id();
JavaDStream<Metadata> metadataDStream = mapWithStateDStream.map(new Tuple2MetadataFunction());
// register ReadReportDStream to report information related to this read.
new ReadReportDStream(metadataDStream.dstream(), id, getSourceName(source, id), stepName)
.register();
// output the actual (deserialized) stream.
WindowedValue.FullWindowedValueCoder<T> coder =
WindowedValue.FullWindowedValueCoder.of(
source.getOutputCoder(),
GlobalWindow.Coder.INSTANCE);
JavaDStream<WindowedValue<T>> readUnboundedStream =
mapWithStateDStream
.flatMap(new Tuple2byteFlatMapFunction())
.map(CoderHelpers.fromByteFunction(coder));
return new UnboundedDataset<>(readUnboundedStream, Collections.singletonList(id));
}
示例2: PubsubInputDStream
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public PubsubInputDStream(final JavaStreamingContext _jssc, final String _subscription, final Integer _batchSize,
final boolean _decodeData) {
super(_jssc.ssc(), new PubsubReceiver(_subscription, _batchSize, _decodeData), STRING_CLASS_TAG);
}
示例3: createStream
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static ReceiverInputDStream<String> createStream(JavaStreamingContext jssc, String accessToken,
BatchRequestBuilder[] batchRequestBuilders) {
return new FacebookInputDStream(jssc.ssc(), accessToken, batchRequestBuilders, StorageLevel.MEMORY_AND_DISK_2());
}