本文整理匯總了Java中org.apache.spark.streaming.api.java.JavaStreamingContext.addStreamingListener方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaStreamingContext.addStreamingListener方法的具體用法?Java JavaStreamingContext.addStreamingListener怎麽用?Java JavaStreamingContext.addStreamingListener使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.spark.streaming.api.java.JavaStreamingContext
的用法示例。
在下文中一共展示了JavaStreamingContext.addStreamingListener方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) {
// String inputFile = StreamKMeans.class.getClassLoader().getResource("centroids.txt").getFile();
SparkConf sparkConf = new SparkConf().setMaster("spark://master:7077").setAppName("JavaKMeans");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.milliseconds(1000));
HashSet<String> topicsSet = new HashSet<>();
topicsSet.add("KMeans");
HashMap<String, String> kafkaParams = new HashMap<>();
// kafkaParams.put("metadata.broker.list", "kafka1:9092,kafka2:9092,kafka3:9092");
kafkaParams.put("metadata.broker.list", "localhost:9092");
kafkaParams.put("auto.offset.reset", "largest");
kafkaParams.put("zookeeper.connect", "zoo1:2181");
kafkaParams.put("group.id", "spark");
// Create direct kafka stream with brokers and topics
JavaPairInputDStream<String, String> lines = KafkaUtils.createDirectStream(
jssc,
String.class,
String.class,
StringDecoder.class,
StringDecoder.class,
kafkaParams,
topicsSet
);
JavaDStream<Vector> points = lines.map(new ParseKafkaString()).map(new ParsePoint());
Vector[] initCentroids = loadInitCentroids();
double[] weights = new double[96];
for (int i = 0; i < 96; i++) {
weights[i] = 1.0 / 96;
}
final StreamingKMeans model = new StreamingKMeans()
.setK(96)
.setDecayFactor(0)
.setInitialCenters(initCentroids, weights);
model.trainOn(points);
points.foreachRDD(new Function2<JavaRDD<Vector>, Time, Void>() {
@Override
public Void call(JavaRDD<Vector> vectorJavaRDD, Time time) throws Exception {
Vector[] vector = model.latestModel().clusterCenters();
for (int i = 0; i < vector.length; i++) {
logger.warn(vector[i].toArray()[0] + "\t" + vector[i].toArray()[1]);
}
return null;
}
});
jssc.addStreamingListener(new PerformanceStreamingListener());
jssc.start();
jssc.awaitTermination();
}
示例2: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("Stateful Network Word Count");
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));
ssc.checkpoint("checkpoint");
ssc.addStreamingListener(new PerformanceStreamingListener());
JavaReceiverInputDStream<String> lines = ssc.socketTextStream("127.0.0.1", 9999);
JavaPairDStream<String, Long> wordCounts = lines.flatMap(new FlatMapFunction<String, String>() {
public Iterable<String> call(String l) throws Exception {
return Arrays.asList(l.split(" "));
}
}).mapToPair(new PairFunction<String, String, Long>() {
public Tuple2<String, Long> call(String w) throws Exception {
return new Tuple2<>(w, 1L);
}
})
.reduceByKey(new Function2<Long, Long, Long>() {
@Override
public Long call(Long aLong, Long aLong2) throws Exception {
return aLong + aLong2;
}
})
.updateStateByKey(new Function2<List<Long>, Optional<Long>, Optional<Long>>() {
public Optional<Long> call(List<Long> values, Optional<Long> state) throws Exception {
if (values == null || values.isEmpty()) {
return state;
}
long sum = 0L;
for (Long v : values) {
sum += v;
}
return Optional.of(state.or(0L) + sum);
}
});
// .updateStateByKey(new Function2<List<Iterable<Long>>, Optional<Long>, Optional<Long>>() {
// @Override
// public Optional<Long> call(List<Iterable<Long>> iterables, Optional<Long> longOptional) throws Exception {
// if (iterables == null || iterables.isEmpty()) {
// return longOptional;
// }
// long sum = 0L;
// for (Iterable<Long> iterable : iterables) {
// for(Long l : iterable)
// sum += l;
// }
// return Optional.of(longOptional.or(0L) + sum);
// }
// });
wordCounts.print();
wordCounts.foreach(new Function2<JavaPairRDD<String, Long>, Time, Void>() {
@Override
public Void call(JavaPairRDD<String, Long> stringLongJavaPairRDD, Time time) throws Exception {
return null;
}
});
ssc.start();
ssc.awaitTermination();
}
示例3: createStream
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private static <E> JavaDStream<MessageAndMetadata<E>> createStream(
JavaStreamingContext jsc, Properties pros, int numberOfReceivers, StorageLevel storageLevel,
KafkaMessageHandler<E> messageHandler) {
AtomicBoolean terminateOnFailure = new AtomicBoolean(false);
List<JavaDStream<MessageAndMetadata<E>>> streamsList =
new ArrayList<>();
JavaDStream<MessageAndMetadata<E>> unionStreams;
int numberOfPartition;
KafkaConfig kafkaConfig = new KafkaConfig(pros);
ZkState zkState = new ZkState(kafkaConfig);
String numberOfPartitionStr =
(String) pros.getProperty(Config.KAFKA_PARTITIONS_NUMBER);
if (numberOfPartitionStr != null) {
numberOfPartition = Integer.parseInt(numberOfPartitionStr);
} else {
_zkPath = (String) kafkaConfig._stateConf.get(Config.ZOOKEEPER_BROKER_PATH);
String _topic = (String) kafkaConfig._stateConf.get(Config.KAFKA_TOPIC);
numberOfPartition = getNumPartitions(zkState, _topic);
}
// Create as many Receiver as Partition
if (numberOfReceivers >= numberOfPartition) {
for (int i = 0; i < numberOfPartition; i++) {
streamsList.add(jsc.receiverStream(new KafkaReceiver(
pros, i, storageLevel, messageHandler)));
}
} else {
// create Range Receivers..
Map<Integer, Set<Integer>> rMap = new HashMap<Integer, Set<Integer>>();
for (int i = 0; i < numberOfPartition; i++) {
int j = i % numberOfReceivers;
Set<Integer> pSet = rMap.get(j);
if (pSet == null) {
pSet = new HashSet<Integer>();
pSet.add(i);
} else {
pSet.add(i);
}
rMap.put(j, pSet);
}
for (int i = 0; i < numberOfReceivers; i++) {
streamsList.add(jsc.receiverStream(new KafkaRangeReceiver(pros, rMap
.get(i), storageLevel, messageHandler)));
}
}
// Union all the streams if there is more than 1 stream
if (streamsList.size() > 1) {
unionStreams =
jsc.union(
streamsList.get(0), streamsList.subList(1, streamsList.size()));
} else {
// Otherwise, just use the 1 stream
unionStreams = streamsList.get(0);
}
final long batchDuration = jsc.ssc().graph().batchDuration().milliseconds();
ReceiverStreamListener listener = new ReceiverStreamListener(kafkaConfig, batchDuration);
jsc.addStreamingListener(listener);
//Reset the fetch size
Utils.setFetchRate(kafkaConfig, kafkaConfig._pollRecords);
zkState.close();
return unionStreams;
}
示例4: from
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public ExpectingToThrow from(JavaStreamingContext ssc) {
ssc.addStreamingListener(this);
return this;
}