當前位置: 首頁>>代碼示例>>Java>>正文


Java Duration類代碼示例

本文整理匯總了Java中org.apache.spark.streaming.Duration的典型用法代碼示例。如果您正苦於以下問題:Java Duration類的具體用法?Java Duration怎麽用?Java Duration使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Duration類屬於org.apache.spark.streaming包,在下文中一共展示了Duration類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void main(String[] args) {

        SparkConf conf = new SparkConf()
                .setAppName("kafka-sandbox")
                .setMaster("local[*]");
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(2000));

        Set<String> topics = Collections.singleton("mytopic");
        Map<String, String> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list", "localhost:9092");

        JavaPairInputDStream<String, String> directKafkaStream = KafkaUtils.createDirectStream(ssc,
                String.class, String.class, StringDecoder.class, StringDecoder.class, kafkaParams, topics);

        directKafkaStream.foreachRDD(rdd -> {
            System.out.println("--- New RDD with " + rdd.partitions().size()
                    + " partitions and " + rdd.count() + " records");
            rdd.foreach(record -> System.out.println(record._2));
        });

        ssc.start();
        ssc.awaitTermination();
    }
 
開發者ID:aseigneurin,項目名稱:kafka-sandbox,代碼行數:25,代碼來源:SparkStringConsumer.java

示例2: main

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void main(String[] args) throws InterruptedException {
  SparkConf sc = new SparkConf().setAppName("POC-Kafka-New");
  
  try(JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(2000))) {
    
    JavaPairInputDStream<String, String> stream = KafkaUtils.createDirectStream(
        jsc, String.class, String.class, StringDecoder.class, StringDecoder.class,
        Collections.singletonMap("metadata.broker.list", KAFKA_HOST_PORT),
        Collections.singleton(EXAMPLE_TOPIC));

    JavaDStream<ExampleXML> records = stream.map(t -> t._2()).map(new ParseXML());
    records.foreachRDD(rdd -> System.out.printf("Amount of XMLs: %d\n", rdd.count()));

    jsc.start();
    jsc.awaitTermination();
  }
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:18,代碼來源:Spark4KafkaNew.java

示例3: main

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void main(String[] args) throws InterruptedException {
  SparkConf sc = new SparkConf().setAppName("POC-Streaming");
  try(JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(2000))) {
    //JavaDStream<SampleXML> records = jsc.textFileStream("input/").map(new ParseXML());
    //textFileStream process lines of files, so xml has to be 1 line to work //alternative below

    JavaRDD<String> files = jsc.sparkContext().wholeTextFiles("input/").map(tuple -> tuple._2());
    Queue<JavaRDD<String>> rddQueue = new LinkedList<>();
    rddQueue.add(files);
    JavaDStream<String> records = jsc.queueStream(rddQueue);

    records.foreachRDD(rdd -> System.out.printf("Amount of XMLs: %d\n", rdd.count()));

    jsc.start();
    jsc.awaitTermination();
  }
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:18,代碼來源:Spark2Streaming.java

示例4: main

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void main(String[] args) throws InterruptedException, IOException {
  SparkConf sc = new SparkConf().setAppName("POC-BigQuery");
  
  try(JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(60000))) {
    JavaPairInputDStream<String, String> stream = KafkaUtils.createDirectStream(
        jsc, String.class, String.class, StringDecoder.class, StringDecoder.class,
        Collections.singletonMap("metadata.broker.list", KAFKA_HOST_PORT), Collections.singleton(EXAMPLE_TOPIC));

    Configuration conf = new Configuration();
    BigQueryConfiguration.configureBigQueryOutput(conf, BQ_EXAMPLE_TABLE, BQ_EXAMPLE_SCHEMA);
    conf.set("mapreduce.job.outputformat.class", BigQueryOutputFormat.class.getName());

    JavaDStream<ExampleXML> records = stream.map(t -> t._2()).map(new ParseXML());
    records.foreachRDD(rdd -> {
      System.out.printf("Amount of XMLs: %d\n", rdd.count());
      long time = System.currentTimeMillis();
      rdd.mapToPair(new PrepToBQ()).saveAsNewAPIHadoopDataset(conf);
      System.out.printf("Sent to BQ in %fs\n", (System.currentTimeMillis()-time)/1000f);
    });
    
    jsc.start();
    jsc.awaitTermination();
  }
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:25,代碼來源:Spark6BigQuery.java

示例5: main

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void main(String[] args) throws InterruptedException, IOException, JAXBException {
  SparkConf sc = new SparkConf().setAppName("Receiving-KafkaToBQ");

  try (JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(60000))) {

    JavaPairDStream<String, String> stream = new KafkaInputWithOffsets(
        KAFKA_HOST_PORT, EXAMPLE_TOPIC, ZOOKEEPER_HOST, ZK_PATH).createResumableStream(jsc);

    stream.foreachRDD(IdleStop.create(jsc, 2, "XMLs count: %d\n"));

    stream
        .mapToPair(parseXml())
        .filter(t -> t != null)
        .mapToPair(prepToBq())
        .foreachRDD(BigQueryHelper.outputTo(BQ_EXAMPLE_TABLE, BQ_EXAMPLE_SCHEMA));

    jsc.start();
    jsc.awaitTermination();
  }
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:21,代碼來源:Spark8Organized.java

示例6: setFromCommandLineArgs

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public static void setFromCommandLineArgs(Options options, String[] args) {
	CommandLineParser parser = new PosixParser();
	try {
		CommandLine cl = parser.parse(options, args);
		// 參數默認值
		THE_INSTANCE.windowLength = new Duration(
				Integer.parseInt(cl.getOptionValue(AppMain.WINDOW_LENGTH, "30")) * 1000);
		THE_INSTANCE.slideInterval = new Duration(
				Integer.parseInt(cl.getOptionValue(AppMain.SLIDE_INTERVAL, "5")) * 1000);
		THE_INSTANCE.kafka_broker = cl.getOptionValue(AppMain.KAFKA_BROKER, "kafka:9092");
		THE_INSTANCE.kafka_topic = cl.getOptionValue(AppMain.KAFKA_TOPIC, "apache");
		THE_INSTANCE.parquet_file = cl.getOptionValue(AppMain.PARQUET_FILE, "/user/spark/");
		THE_INSTANCE.initialized = true;
	} catch (ParseException e) {
		THE_INSTANCE.initialized = false;
		System.err.println("Parsing failed.  Reason: " + e.getMessage());
	}
}
 
開發者ID:sectong,項目名稱:SparkToParquet,代碼行數:19,代碼來源:Flags.java

示例7: testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	properties.setProperty(PROP_URL, NATS_SERVER_URL);
	final JavaReceiverInputDStream<String> messages =  
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withProperties(properties)
				.withSubjects(DEFAULT_SUBJECT)
				.asStreamOf(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:StandardNatsToSparkConnectorTest.java

示例8: testNatsToSparkConnectorWithAdditionalPropertiesAndMultipleSubjects

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalPropertiesAndMultipleSubjects() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	final JavaReceiverInputDStream<String> messages = 
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withNatsURL(NATS_SERVER_URL)
				.withProperties(properties)
				.withSubjects(DEFAULT_SUBJECT, "EXTRA_SUBJECT")
				.asStreamOf(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:StandardNatsToSparkConnectorTest.java

示例9: testNatsToSparkConnectorWithAdditionalProperties

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalProperties() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	properties.setProperty(PROP_SUBJECTS, "sub1,"+DEFAULT_SUBJECT+" , sub2");
	properties.setProperty(PROP_URL, NATS_SERVER_URL);
	final JavaReceiverInputDStream<String> messages = 
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withProperties(properties)
				.asStreamOf(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:StandardNatsToSparkConnectorTest.java

示例10: testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	properties.setProperty(PROP_URL, NATS_SERVER_URL);

	final JavaPairDStream<String, String> messages = 
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withProperties(properties)
				.withSubjects(DEFAULT_SUBJECT)
				.asStreamOfKeyValue(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:18,代碼來源:StandardNatsToSparkKeyValueConnectorTest.java

示例11: testNatsToSparkConnectorWithAdditionalPropertiesAndMultipleSubjects

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalPropertiesAndMultipleSubjects() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	final JavaPairDStream<String, String> messages = 
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withNatsURL(NATS_SERVER_URL)
				.withProperties(properties)
				.withSubjects(DEFAULT_SUBJECT, "EXTRA_SUBJECT")
				.asStreamOfKeyValue(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:StandardNatsToSparkKeyValueConnectorTest.java

示例12: testNatsToSparkConnectorWithAdditionalProperties

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalProperties() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	properties.setProperty(PROP_SUBJECTS, "sub1,"+DEFAULT_SUBJECT+" , sub2");
	properties.setProperty(PROP_URL, NATS_SERVER_URL);
	final JavaPairDStream<String, String> messages = 
			NatsToSparkConnector
				.receiveFromNats(String.class, StorageLevel.MEMORY_ONLY())
				.withProperties(properties)
				.asStreamOfKeyValue(ssc);

	validateTheReceptionOfMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:StandardNatsToSparkKeyValueConnectorTest.java

示例13: testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
@Test(timeout=6000)
public void testNatsToSparkConnectorWithAdditionalPropertiesAndSubjects() throws InterruptedException {
	
	JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(200));

	final Properties properties = new Properties();
	properties.setProperty(PROP_URL, NATS_SERVER_URL);
	final JavaReceiverInputDStream<Integer> messages =  
			NatsToSparkConnector
				.receiveFromNats(Integer.class, StorageLevel.MEMORY_ONLY())
				.withProperties(properties)
				.withSubjects(DEFAULT_SUBJECT)
				.asStreamOf(ssc);

	validateTheReceptionOfIntegerMessages(ssc, messages);
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:17,代碼來源:IntegerNatsToSparkConnectorTest.java

示例14: join

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
/**
 * Join two streams base on processing time
 *
 * @param componentId
 * @param joinStream         the other stream<K,R>
 * @param windowDuration     window length of this stream
 * @param joinWindowDuration window length of joinStream
 * @param <R>
 * @return
 * @throws WorkloadException
 */
@Override
public <R> PairWorkloadOperator<K, Tuple2<V, R>> join(String componentId,
                                                      PairWorkloadOperator<K, R> joinStream,
                                                      TimeDurations windowDuration,
                                                      TimeDurations joinWindowDuration) throws WorkloadException {
    if (windowDuration.toMilliSeconds() % windowDuration.toMilliSeconds() != 0) {
        throw new WorkloadException("WindowDuration should be multi times of joinWindowDuration");
    }
    Duration windowDurations = Utils.timeDurationsToSparkDuration(windowDuration);
    Duration windowDurations2 = Utils.timeDurationsToSparkDuration(joinWindowDuration);

    if (joinStream instanceof SparkPairWorkloadOperator) {
        SparkPairWorkloadOperator<K, R> joinSparkStream = ((SparkPairWorkloadOperator<K, R>) joinStream);
        JavaPairDStream<K, Tuple2<V, R>> joinedStream = pairDStream
                .window(windowDurations.plus(windowDurations2), windowDurations2)
                .join(joinSparkStream.pairDStream.window(windowDurations2, windowDurations2));
        // filter illegal joined data

        return new SparkPairWorkloadOperator<>(joinedStream, parallelism);
    }
    throw new WorkloadException("Cast joinStream to SparkPairWorkloadOperator failed");
}
 
開發者ID:wangyangjun,項目名稱:StreamBench,代碼行數:34,代碼來源:SparkPairWorkloadOperator.java

示例15: JKinesisReceiver

import org.apache.spark.streaming.Duration; //導入依賴的package包/類
public JKinesisReceiver(String applicationName, String streamName,
                        String endpointUrl, String regionName,
                        Duration checkpoint, InitialPositionInStream position) {
    super(StorageLevel.MEMORY_ONLY_SER());

    this.workerId = getHostname() + ":" + String.valueOf(UUID.randomUUID());
    this.checkpointInterval = checkpoint;
    this.initialPosition = position;

    Region region = RegionUtils.getRegion(regionName);

    try {
        this.kclConfig = new KinesisClientLibConfiguration(applicationName, streamName,
                                                  getCredsProvider(),
                                                  workerId)
                        .withCommonClientConfig(CLIENT_CONF)
                        .withRegionName(region.getName())
                        .withKinesisEndpoint(endpointUrl)
                        .withInitialPositionInStream(InitialPositionInStream.LATEST)
                        .withTaskBackoffTimeMillis(500);
    } catch (Exception ex) {
        // do absolutely nothing - and feel good about it!
        // but ...
        // we'd do something meaningful in a PROD context
    }
}
 
開發者ID:lenards,項目名稱:spark-cstar-canaries,代碼行數:27,代碼來源:JKinesisReceiver.java


注:本文中的org.apache.spark.streaming.Duration類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。