本文整理匯總了Java中org.apache.spark.streaming.api.java.JavaStreamingContext.awaitTermination方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaStreamingContext.awaitTermination方法的具體用法?Java JavaStreamingContext.awaitTermination怎麽用?Java JavaStreamingContext.awaitTermination使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.spark.streaming.api.java.JavaStreamingContext
的用法示例。
在下文中一共展示了JavaStreamingContext.awaitTermination方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) {
SparkConf conf = new SparkConf()
.setAppName("kafka-sandbox")
.setMaster("local[*]");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(2000));
Set<String> topics = Collections.singleton("mytopic");
Map<String, String> kafkaParams = new HashMap<>();
kafkaParams.put("metadata.broker.list", "localhost:9092");
JavaPairInputDStream<String, String> directKafkaStream = KafkaUtils.createDirectStream(ssc,
String.class, String.class, StringDecoder.class, StringDecoder.class, kafkaParams, topics);
directKafkaStream.foreachRDD(rdd -> {
System.out.println("--- New RDD with " + rdd.partitions().size()
+ " partitions and " + rdd.count() + " records");
rdd.foreach(record -> System.out.println(record._2));
});
ssc.start();
ssc.awaitTermination();
}
示例2: run
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private void run(CompositeConfiguration conf) {
// Spark conf
SparkConf sparkConf = new SparkConf().setAppName("TwitterSparkCrawler").setMaster(conf.getString("spark.master"))
.set("spark.serializer", conf.getString("spark.serializer"));
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(conf.getLong("stream.duration")));
// Twitter4J
// IMPORTANT: put keys in twitter4J.properties
Configuration twitterConf = ConfigurationContext.getInstance();
Authorization twitterAuth = AuthorizationFactory.getInstance(twitterConf);
// Create twitter stream
String[] filters = { "#Car" };
TwitterUtils.createStream(jssc, twitterAuth, filters).print();
// Start the computation
jssc.start();
jssc.awaitTermination();
}
示例3: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
System.setProperty("hadoop.home.dir", "E:\\hadoop");
final String ip = "10.0.75.1";
final int port = Integer.parseInt("9000");
final String checkpointDirectory = "E:\\hadoop\\checkpoint";
// Function to create JavaStreamingContext without any output operations
// (used to detect the new context)
Function0<JavaStreamingContext> createContextFunc = new Function0<JavaStreamingContext>() {
@Override
public JavaStreamingContext call() {
return createContext(ip, port, checkpointDirectory);
}
};
JavaStreamingContext ssc = JavaStreamingContext.getOrCreate(checkpointDirectory, createContextFunc);
ssc.start();
ssc.awaitTermination();
}
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:20,代碼來源:WordCountRecoverableEx.java
示例4: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws IOException {
Flags.setFromCommandLineArgs(THE_OPTIONS, args);
// 初始化Spark Conf.
SparkConf conf = new SparkConf().setAppName("A SECTONG Application: Apache Log Analysis with Spark");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext jssc = new JavaStreamingContext(sc, Flags.getInstance().getSlideInterval());
SQLContext sqlContext = new SQLContext(sc);
// 初始化參數
HashSet<String> topicsSet = new HashSet<String>(Arrays.asList(Flags.getInstance().getKafka_topic().split(",")));
HashMap<String, String> kafkaParams = new HashMap<String, String>();
kafkaParams.put("metadata.broker.list", Flags.getInstance().getKafka_broker());
// 從Kafka Stream獲取數據
JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class,
StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);
JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
private static final long serialVersionUID = 5266880065425088203L;
public String call(Tuple2<String, String> tuple2) {
return tuple2._2();
}
});
JavaDStream<ApacheAccessLog> accessLogsDStream = lines.flatMap(line -> {
List<ApacheAccessLog> list = new ArrayList<>();
try {
// 映射每一行
list.add(ApacheAccessLog.parseFromLogLine(line));
return list;
} catch (RuntimeException e) {
return list;
}
}).cache();
accessLogsDStream.foreachRDD(rdd -> {
// rdd to DataFrame
DataFrame df = sqlContext.createDataFrame(rdd, ApacheAccessLog.class);
// 寫入Parquet文件
df.write().partitionBy("ipAddress", "method", "responseCode").mode(SaveMode.Append).parquet(Flags.getInstance().getParquetFile());
return null;
});
// 啟動Streaming服務器
jssc.start(); // 啟動計算
jssc.awaitTermination(); // 等待終止
}
示例5: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args)
{
SparkConf conf = new SparkConf();
conf.setAppName("Wordcount Background");
conf.setMaster("local");
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(15));
JavaDStream<String> lines = ssc.textFileStream("/home/rahul/DATASET");
JavaDStream<String> words = lines.flatMap(WORDS_EXTRACTOR);
JavaPairDStream<String, Integer> pairs = words.mapToPair(WORDS_MAPPER);
JavaPairDStream<String, Integer> counter = pairs.reduceByKey(WORDS_REDUCER);
counter.print();
ssc.start();
ssc.awaitTermination();
/*JavaRDD<String> file = context.textFile("/home/rahul/Desktop/palestine.txt");
JavaRDD<String> words = file.flatMap(WORDS_EXTRACTOR);
JavaPairRDD<String, Integer> pairs = words.mapToPair(WORDS_MAPPER);
JavaPairRDD<String, Integer> counter = pairs.reduceByKey(WORDS_REDUCER);
counter.saveAsTextFile("/home/rahul/Desktop/wc");
context.close();*/
}
示例6: run
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public void run() throws IOException {
SparkConf conf = new SparkConf();
conf.setAppName(getAppName());
conf.set(SPARK_SERIALIZER, ORG_APACHE_SPARK_SERIALIZER_KRYO_SERIALIZER);
JavaSparkUtil.packProjectJars(conf);
setupSparkConf(conf);
JavaStreamingContext ssc = new JavaStreamingContext(conf, getDuration());
List<JavaDStream<T>> streamsList = getStreamsList(ssc);
// Union all the streams if there is more than 1 stream
JavaDStream<T> streams = unionStreams(ssc, streamsList);
JavaPairDStream<String, RowMutation> pairDStream = streams.mapToPair(new PairFunction<T, String, RowMutation>() {
public Tuple2<String, RowMutation> call(T t) {
RowMutation rowMutation = convert(t);
return new Tuple2<String, RowMutation>(rowMutation.getRowId(), rowMutation);
}
});
pairDStream.foreachRDD(getFunction());
ssc.start();
ssc.awaitTermination();
}
示例7: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
String messagingServiceHost = System.getenv("MESSAGING_SERVICE_HOST");
if (messagingServiceHost != null) {
host = messagingServiceHost;
}
LOG.info("host = {}", host);
String messagingServicePort = System.getenv("MESSAGING_SERVICE_PORT");
if (messagingServicePort != null) {
port = Integer.valueOf(messagingServicePort);
}
LOG.info("port = {}", port);
JavaStreamingContext ssc = JavaStreamingContext.getOrCreate(CHECKPOINT_DIR, AMQPTemperature::createStreamingContext);
ssc.start();
ssc.awaitTermination();
}
示例8: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws InterruptedException {
// getting AMQP messaging service connection information
String messagingServiceHost = System.getenv("MESSAGING_SERVICE_HOST");
if (messagingServiceHost != null) {
host = messagingServiceHost;
}
String messagingServicePort = System.getenv("MESSAGING_SERVICE_PORT");
if (messagingServicePort != null) {
port = Integer.valueOf(messagingServicePort);
}
log.info("AMQP messaging service hostname {}:{}", host, port);
// getting credentials for authentication
username = System.getenv("SPARK_DRIVER_USERNAME");
password = System.getenv("SPARK_DRIVER_PASSWORD");
log.info("Credentials {}/{}", username, password);
JavaStreamingContext ssc = JavaStreamingContext.getOrCreate(CHECKPOINT_DIR, TemperatureAnalyzer::createStreamingContext);
ssc.start();
ssc.awaitTermination();
}
示例9: processMQTT
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
/**
* This will start the spark stream that is reading from the MQTT queue
*
* @param broker - MQTT broker url
* @param topic - MQTT topic name
* @param numSeconds - Number of seconds between batch size
*/
public void processMQTT(final String broker, final String topic, final int numSeconds) {
LOG.info("************ SparkStreamingMQTTOutside.processMQTT start");
// Create the spark application and set the name to MQTT
SparkConf sparkConf = new SparkConf().setAppName("MQTT");
// Create the spark streaming context with a 'numSeconds' second batch size
jssc = new JavaStreamingContext(sparkConf, Durations.seconds(numSeconds));
jssc.checkpoint(checkpointDirectory);
LOG.info("************ SparkStreamingMQTTOutside.processMQTT about to read the MQTTUtils.createStream");
//2. MQTTUtils to collect MQTT messages
JavaReceiverInputDStream<String> messages = MQTTUtils.createStream(jssc, broker, topic);
LOG.info("************ SparkStreamingMQTTOutside.processMQTT about to do foreachRDD");
//process the messages on the queue and save them to the database
messages.foreachRDD(new SaveRDD());
LOG.info("************ SparkStreamingMQTTOutside.processMQTT prior to context.strt");
// Start the context
jssc.start();
jssc.awaitTermination();
}
示例10: start
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private void start() {
// Create a local StreamingContext with two working thread and batch interval of
// 1 second
SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount");
JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
JavaDStream<String> msgDataStream = jssc.textFileStream(StreamingUtils.getInputDirectory());
msgDataStream.print();
jssc.start();
try {
jssc.awaitTermination();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
示例11: start
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private void start() {
// Create a local StreamingContext with two working thread and batch interval of
// 1 second
SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("Streaming Ingestion File System Text File to Dataframe");
JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
JavaDStream<String> msgDataStream = jssc.textFileStream(StreamingUtils.getInputDirectory());
msgDataStream.print();
// Create JavaRDD<Row>
msgDataStream.foreachRDD(new RowProcessor());
jssc.start();
try {
jssc.awaitTermination();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
開發者ID:jgperrin,項目名稱:net.jgp.labs.spark,代碼行數:21,代碼來源:StreamingIngestionFileSystemTextFileToDataframeMultipleClassesApp.java
示例12: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) {
if (args.length < 4) {
System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>");
System.exit(1);
}
SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount");
// Create the context with a 1 second batch size
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));
int numThreads = Integer.parseInt(args[3]);
Map<String, Integer> topicMap = new HashMap<String, Integer>();
String[] topics = args[2].split(",");
for (String topic : topics) {
topicMap.put(topic, numThreads);
}
JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1],
topicMap);
JavaDStream<String> lines = messages.map(tuple2 -> tuple2._2());
JavaDStream<String> words = lines.flatMap(x -> Lists.newArrayList(SPACE.split(x)));
JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<String, Integer>(s, 1)).reduceByKey(
(i1, i2) -> i1 + i2);
wordCounts.print();
jssc.start();
jssc.awaitTermination();
}
示例13: run
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private void run(CompositeConfiguration conf) {
// Spark conf
SparkConf sparkConf = new SparkConf().setAppName("TwitterSparkCrawler").setMaster(conf.getString("spark.master"))
.set("spark.serializer", conf.getString("spark.serializer"))
.registerKryoClasses(new Class<?>[] { Parameter.class, BatchRequestBuilder.class, BatchRequest.class });
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(conf.getLong("stream.duration")));
// Create facebook stream
Parameter typeParam = Parameter.with("type", "event");
FacebookUtils
.createStream(jssc, conf.getString("access.token"),
new BatchRequestBuilder[] {
new BatchRequestBuilder("search").parameters(new Parameter[] { Parameter.with("q", "car"), typeParam }) })
.print();
// Start the computation
jssc.start();
jssc.awaitTermination();
}
示例14: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
String zkQuorum = args[0];
String group = args[1];
SparkConf conf = new SparkConf().setAppName("KafkaInput");
// Create a StreamingContext with a 1 second batch size
JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(1000));
Map<String, Integer> topics = new HashMap<String, Integer>();
topics.put("pandas", 1);
JavaPairDStream<String, String> input = KafkaUtils.createStream(jssc, zkQuorum, group, topics);
input.print();
// start our streaming context and wait for it to "finish"
jssc.start();
// Wait for 10 seconds then exit. To run forever call without a timeout
jssc.awaitTermination(10000);
// Stop the streaming context
jssc.stop();
}
示例15: main
import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
String master = args[0];
JavaSparkContext sc = new JavaSparkContext(master, "StreamingLogInput");
// Create a StreamingContext with a 1 second batch size
JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(1000));
// Create a DStream from all the input on port 7777
JavaDStream<String> lines = jssc.socketTextStream("localhost", 7777);
// Filter our DStream for lines with "error"
JavaDStream<String> errorLines = lines.filter(new Function<String, Boolean>() {
public Boolean call(String line) {
return line.contains("error");
}});
// Print out the lines with errors, which causes this DStream to be evaluated
errorLines.print();
// start our streaming context and wait for it to "finish"
jssc.start();
// Wait for 10 seconds then exit. To run forever call without a timeout
jssc.awaitTermination(10000);
// Stop the streaming context
jssc.stop();
}