當前位置: 首頁>>代碼示例>>Java>>正文


Java JavaStreamingContext.stop方法代碼示例

本文整理匯總了Java中org.apache.spark.streaming.api.java.JavaStreamingContext.stop方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaStreamingContext.stop方法的具體用法?Java JavaStreamingContext.stop怎麽用?Java JavaStreamingContext.stop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.spark.streaming.api.java.JavaStreamingContext的用法示例。


在下文中一共展示了JavaStreamingContext.stop方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
   String zkQuorum = args[0];
   String group = args[1];
   SparkConf conf = new SparkConf().setAppName("KafkaInput");
   // Create a StreamingContext with a 1 second batch size
   JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(1000));
   Map<String, Integer> topics = new HashMap<String, Integer>();
   topics.put("pandas", 1);
   JavaPairDStream<String, String> input = KafkaUtils.createStream(jssc, zkQuorum, group, topics);
   input.print();
   // start our streaming context and wait for it to "finish"
   jssc.start();
   // Wait for 10 seconds then exit. To run forever call without a timeout
   jssc.awaitTermination(10000);
   // Stop the streaming context
   jssc.stop();
}
 
開發者ID:holdenk,項目名稱:learning-spark-examples,代碼行數:18,代碼來源:KafkaInput.java

示例2: main

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	String master = args[0];
	JavaSparkContext sc = new JavaSparkContext(master, "StreamingLogInput");
   // Create a StreamingContext with a 1 second batch size
   JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(1000));
   // Create a DStream from all the input on port 7777
   JavaDStream<String> lines = jssc.socketTextStream("localhost", 7777);
   // Filter our DStream for lines with "error"
   JavaDStream<String> errorLines = lines.filter(new Function<String, Boolean>() {
       public Boolean call(String line) {
         return line.contains("error");
       }});
   // Print out the lines with errors, which causes this DStream to be evaluated
   errorLines.print();
   // start our streaming context and wait for it to "finish"
   jssc.start();
   // Wait for 10 seconds then exit. To run forever call without a timeout
   jssc.awaitTermination(10000);
   // Stop the streaming context
   jssc.stop();
}
 
開發者ID:holdenk,項目名稱:learning-spark-examples,代碼行數:22,代碼來源:StreamingLogInput.java

示例3: create

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
public static <A extends JavaRDDLike<?, ?>> VoidFunction<A> create(JavaStreamingContext jsc, long amount, String printf) {
  final LongAccumulator stopAcc = jsc.ssc().sc().longAccumulator();
  return rdd -> {
    if (printf != null)
      System.out.printf(printf, rdd.count());
    if (rdd.count() == 0L) {
      stopAcc.add(1L);
      if (stopAcc.value() >= amount)
        jsc.stop();
    } else
      stopAcc.reset();
  };
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:14,代碼來源:IdleStop.java

示例4: shutdownGracefully

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
/**
 * Shutdown gracefully a streaming spark job.
 *
 * @param jssc
 * @param checkIntervalMillis How often to check
 * @throws InterruptedException
 */
public static void shutdownGracefully(JavaStreamingContext jssc, int checkIntervalMillis)
    throws InterruptedException {
  boolean isStopped = false;
  while (!isStopped) {
    isStopped = jssc.awaitTerminationOrTimeout(checkIntervalMillis);
    if (!isStopped && sparkInfo.isShutdownRequested()) {
      LOG.info("Marker file has been removed, will attempt to stop gracefully the spark streaming context");
      jssc.stop(true, true);
    }
  }
}
 
開發者ID:hopshadoop,項目名稱:hops-util,代碼行數:19,代碼來源:HopsUtil.java

示例5: run

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
private void run(CompositeConfiguration conf) {
        // Kafka props
        String kafkaBrokers = conf.getString("metadata.broker.list");
        String topics = conf.getString("consumer.topic");
        String fromOffset = conf.getString("auto.offset.reset");

        // Spark props
        String sparkMaster = conf.getString("spark.master");
        String sparkSerDe = conf.getString("spark.serializer");
        long sparkStreamDuration = conf.getLong("stream.duration");

        SparkConf sparkConf = new SparkConf().setAppName("Kafka Spark ES Flow with Java API").setMaster(sparkMaster).set("spark.serializer",
                sparkSerDe);

        JavaSparkContext sp = new JavaSparkContext(sparkConf);
        JavaStreamingContext jssc = new JavaStreamingContext(sp, Durations.seconds(sparkStreamDuration));
        SQLContext sqlContext = new SQLContext(sp);
        H2OContext h2oContext = new H2OContext(sp.sc());
        h2oContext.start();

        HashSet<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
        HashMap<String, String> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list", kafkaBrokers);
        kafkaParams.put("auto.offset.reset", fromOffset);

        CraigslistJobTitlesApp staticApp = new CraigslistJobTitlesApp(craigslistJobTitles, sp.sc(), sqlContext, h2oContext);
        try {
             final Tuple2<Model<?, ?, ?>, Word2VecModel> tModel = staticApp.buildModels(craigslistJobTitles, "initialModel");
//            final Tuple2<Model<?, ?, ?>, Word2VecModel> tModel = importModels(h2oModelFolder, word2VecModelFolder, sp.sc());
//            final Model<?, ?, ?> tModel1 = importH2OModel(h2oModelFolder1);

            final String modelId = tModel._1()._key.toString();
            final Word2VecModel w2vModel = tModel._2();
            // exportModels(tModel._1(), w2vModel, sp.sc());

            // Create direct kafka stream with brokers and topics
            JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class,
                    StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);

            // Classify incoming messages
            messages.map(mesage -> mesage._2()).filter(str -> !str.isEmpty())
                    .map(jobTitle -> staticApp.classify(jobTitle, modelId, w2vModel))
                    .map(pred -> new StringBuilder(100).append('\"').append(pred._1()).append("\" = ").append(Arrays.toString(pred._2())))
                    .print();

//            messages.map(mesage -> mesage._2()).filter(str -> !str.isEmpty())
//                    .map(jobTitle -> tModel1.score(new H2OFrame(jobTitle)))
//                    .map(pred -> pred._names)
//                    .print();

            jssc.start();
            jssc.awaitTermination();
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            jssc.stop();
            staticApp.shutdown();
        }
    }
 
開發者ID:ogidogi,項目名稱:laughing-octo-sansa,代碼行數:60,代碼來源:StreamingUserTypeClassification.java

示例6: run

import org.apache.spark.streaming.api.java.JavaStreamingContext; //導入方法依賴的package包/類
@SuppressWarnings("deprecation")
private void run() {

  Properties props = new Properties();
  props.put("zookeeper.hosts", "localhost");
  props.put("zookeeper.port", "2181");
  props.put("kafka.topic", "mytopic");
  props.put("kafka.consumer.id", "kafka-consumer");
  // Optional Properties
  // Optional Properties
  props.put("consumer.forcefromstart", "true");
  props.put("max.poll.records", "100");
  props.put("consumer.fillfreqms", "1000");
  props.put("consumer.backpressure.enabled", "true");
  //Kafka properties
  props.put("bootstrap.servers", "localhost:9093");
  props.put("security.protocol", "SSL");
  props.put("ssl.truststore.location","~/kafka-securitykafka.server.truststore.jks");
  props.put("ssl.truststore.password", "test1234");

  SparkConf _sparkConf = new SparkConf();
  JavaStreamingContext jsc = new JavaStreamingContext(_sparkConf, Durations.seconds(30));
  // Specify number of Receivers you need.
  int numberOfReceivers = 1;

  JavaDStream<MessageAndMetadata<byte[]>> unionStreams = ReceiverLauncher.launch(
      jsc, props, numberOfReceivers, StorageLevel.MEMORY_ONLY());

  //Get the Max offset from each RDD Partitions. Each RDD Partition belongs to One Kafka Partition
  JavaPairDStream<Integer, Iterable<Long>> partitonOffset = ProcessedOffsetManager
      .getPartitionOffset(unionStreams, props);
  

  //Start Application Logic
  unionStreams.foreachRDD(new VoidFunction<JavaRDD<MessageAndMetadata<byte[]>>>() {
    @Override
    public void call(JavaRDD<MessageAndMetadata<byte[]>> rdd) throws Exception {

  	rdd.foreachPartition(new VoidFunction<Iterator<MessageAndMetadata<byte[]>>>() {
	
	@Override
	public void call(Iterator<MessageAndMetadata<byte[]>> mmItr) throws Exception {
		while(mmItr.hasNext()) {
			MessageAndMetadata<byte[]> mm = mmItr.next();
			byte[] key = mm.getKey();
			byte[] value = mm.getPayload();
			Headers headers = mm.getHeaders();
			System.out.println("Key :" + new String(key) + " Value :" + new String(value));
			if(headers != null) {
				Header[] harry = headers.toArray();
				for(Header header : harry) {
					String hkey = header.key();
					byte[] hvalue = header.value();
					System.out.println("Header Key :" + hkey + " Header Value :" + new String(hvalue));
				}
			}
			
		}
		
	}
});
    }
  });
  //End Application Logic

  //Persists the Max Offset of given Kafka Partition to ZK
  ProcessedOffsetManager.persists(partitonOffset, props);

  try {
    jsc.start();
    jsc.awaitTermination();
  }catch (Exception ex ) {
    jsc.ssc().sc().cancelAllJobs();
    jsc.stop(true, false);
    System.exit(-1);
  }
}
 
開發者ID:dibbhatt,項目名稱:kafka-spark-consumer,代碼行數:78,代碼來源:SampleConsumer.java


注:本文中的org.apache.spark.streaming.api.java.JavaStreamingContext.stop方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。