當前位置: 首頁>>代碼示例>>Java>>正文


Java VoidFunction類代碼示例

本文整理匯總了Java中org.apache.spark.api.java.function.VoidFunction的典型用法代碼示例。如果您正苦於以下問題:Java VoidFunction類的具體用法?Java VoidFunction怎麽用?Java VoidFunction使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


VoidFunction類屬於org.apache.spark.api.java.function包,在下文中一共展示了VoidFunction類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: combineShortSessionsInParallel

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public void combineShortSessionsInParallel(int timeThres) throws InterruptedException, IOException {

    JavaRDD<String> userRDD = getUserRDD(this.cleanupType);

    userRDD.foreachPartition(new VoidFunction<Iterator<String>>() {
      /**
       *
       */
      private static final long serialVersionUID = 1L;

      @Override
      public void call(Iterator<String> arg0) throws Exception {
        ESDriver tmpES = new ESDriver(props);
        tmpES.createBulkProcessor();
        while (arg0.hasNext()) {
          String s = arg0.next();
          combineShortSessions(tmpES, s, timeThres);
        }
        tmpES.destroyBulkProcessor();
        tmpES.close();
      }
    });
  }
 
開發者ID:apache,項目名稱:incubator-sdap-mudrod,代碼行數:24,代碼來源:SessionGenerator.java

示例2: outputTo

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public static <X> VoidFunction<JavaPairRDD<X, JsonObject>> outputTo(String table, String schema) throws IOException {
  Configuration conf = new Configuration();
  conf.set("mapreduce.job.outputformat.class", BigQueryOutputFormat.class.getName());
  BigQueryConfiguration.configureBigQueryOutput(conf, table, schema);

  return rdd -> {
    if (rdd.count() > 0L) {
      long time = System.currentTimeMillis();
      /* This was only required the first time on a fresh table, it seems I had to kickstart the _PARTITIONTIME pseudo-column
       * but now it automatically add to the proper table using ingestion time. Using the decorator would only be required
       * if we were to place the entries using their "event timestamp", e.g. loading rows on old partitions.
       * Implementing that would be much harder though, since'd have to check each message, or each "partition" (date-based)
      if (partitioned) {
        String today = ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ofPattern("yyyyMMdd"));
        BigQueryConfiguration.configureBigQueryOutput(conf, table + "$" + today, schema);
      }*/
      rdd.saveAsNewAPIHadoopDataset(conf);
      System.out.printf("Sent %d rows to BQ in %.1fs\n", rdd.count(), (System.currentTimeMillis() - time) / 1000f);
    }
  };
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:22,代碼來源:BigQueryHelper.java

示例3: publishToNats

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * @param stream, the Spark Stream to publish to NATS
 * @param dataEncoder, the function used to encode the Spark Stream Records into the NATS Message Payloads
 */
public <V extends Object> void publishToNats(final JavaDStream<V> stream, final Function<V, byte[]> dataEncoder) {
	logger.trace("publishToNats(JavaDStream<String> stream)");
	stream.foreachRDD((VoidFunction<JavaRDD<V>>) rdd -> {
		logger.trace("stream.foreachRDD");
		rdd.foreachPartitionAsync(objects -> {
			logger.trace("rdd.foreachPartition");
			final SparkToNatsConnector<?> connector = getConnector();
			while(objects.hasNext()) {
				final V obj = objects.next();
				logger.trace("Will publish {}", obj);
				connector.publishToNats(dataEncoder.apply(obj));
			}
			returnConnector(connector);  // return to the pool for future reuse
		});
	});
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:21,代碼來源:SparkToNatsConnectorPool.java

示例4: publishToNatsAsKeyValue

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * @param stream, the Spark Stream (composed of Key/Value Records) to publish to NATS
 * @param dataEncoder, the function used to encode the Spark Stream Records into the NATS Message Payloads
 */
public <K extends Object, V extends Object> void publishToNatsAsKeyValue(final JavaPairDStream<K, V> stream, final Function<V, byte[]> dataEncoder) {
	logger.trace("publishToNats(JavaPairDStream<String, String> stream)");
	setStoredAsKeyValue(true);
	
	stream.foreachRDD((VoidFunction<JavaPairRDD<K, V>>) rdd -> {
		logger.trace("stream.foreachRDD");
		rdd.foreachPartitionAsync((VoidFunction<Iterator<Tuple2<K,V>>>) tuples -> {
			logger.trace("rdd.foreachPartition");
			final SparkToNatsConnector<?> connector = getConnector();
			while(tuples.hasNext()) {
				final Tuple2<K,V> tuple = tuples.next();
				logger.trace("Will publish {}", tuple);
				connector.publishToNats(tuple._1.toString(), dataEncoder.apply(tuple._2));
			}
			returnConnector(connector);  // return to the pool for future reuse
		});
	});
}
 
開發者ID:Logimethods,項目名稱:nats-connector-spark,代碼行數:23,代碼來源:SparkToNatsConnectorPool.java

示例5: main

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public static void main(String[] args) {
	SparkConf conf = new SparkConf().setAppName("VideoInput").setMaster("local[2]");
	JavaSparkContext sc = new JavaSparkContext(conf);
	
	Configuration hc = new org.apache.hadoop.conf.Configuration();
	JavaPairRDD<Text, HBMat> video = sc.newAPIHadoopFile("data/bike.avi", VideoInputFormat.class, Text.class, HBMat.class,hc);
	
	video.foreach(new VoidFunction<Tuple2<Text,HBMat>>() {	
		@Override
		public void call(Tuple2<Text, HBMat> tuple) throws Exception {
			HBMat image = (HBMat)tuple._2;
			System.out.print(image.getBmat().dump());
		}
	});
	
	System.out.print(video.count());
}
 
開發者ID:OpenVMC,項目名稱:HadoopCV,代碼行數:18,代碼來源:InputFormatTest.java

示例6: SaveRasterImageAsLocalFile

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as local file.
 *
 * @param distributedImage the distributed image
 * @param outputPath the output path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 * @throws Exception the exception
 */
public boolean SaveRasterImageAsLocalFile(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage, final String outputPath, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY) throws Exception
{
	logger.info("[GeoSparkViz][SaveRasterImageAsLocalFile][Start]");
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		deleteLocalFile(outputPath+"-"+ RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i),imageType);
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsLocalFile(integerImageSerializableWrapperTuple2._2.getImage(), outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsLocalFile][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:28,代碼來源:GeoSparkVizImageGenerator.java

示例7: SaveRasterImageAsHadoopFile

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as hadoop file.
 *
 * @param distributedImage the distributed image
 * @param outputPath the output path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 * @throws Exception the exception
 */
public boolean SaveRasterImageAsHadoopFile(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage, final String outputPath, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY) throws Exception
{
	logger.info("[GeoSparkViz][SaveRasterImageAsHadoopFile][Start]");
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		deleteHadoopFile(outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i)+".", imageType);
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsHadoopFile(integerImageSerializableWrapperTuple2._2.getImage(), outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsHadoopFile][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:28,代碼來源:GeoSparkVizImageGenerator.java

示例8: SaveRasterImageAsS3File

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as S 3 file.
 *
 * @param distributedImage the distributed image
 * @param regionName the region name
 * @param accessKey the access key
 * @param secretKey the secret key
 * @param bucketName the bucket name
 * @param path the path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 */
public boolean SaveRasterImageAsS3File(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage,
									   final String regionName, final String accessKey, final String secretKey,
									   final String bucketName, final String path, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY)
{
	logger.info("[GeoSparkViz][SaveRasterImageAsS3File][Start]");
	S3Operator s3Operator = new S3Operator(regionName, accessKey, secretKey);
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		s3Operator.deleteImage(bucketName, path+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i)+"."+imageType.getTypeName());
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsS3File(integerImageSerializableWrapperTuple2._2.getImage(), regionName, accessKey, secretKey, bucketName, path+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsS3File][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:34,代碼來源:GeoSparkVizImageGenerator.java

示例9: SaveRasterImageAsLocalFile

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as local file.
 *
 * @param distributedImage the distributed image
 * @param outputPath the output path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 * @throws Exception the exception
 */
public boolean SaveRasterImageAsLocalFile(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage, final String outputPath, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY) throws Exception
{
	logger.info("[GeoSparkViz][SaveRasterImageAsLocalFile][Start]");
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		deleteLocalFile(outputPath+"-"+ RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i),imageType);
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsLocalFile(integerImageSerializableWrapperTuple2._2.image, outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsLocalFile][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:28,代碼來源:ImageGenerator.java

示例10: SaveRasterImageAsHadoopFile

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as hadoop file.
 *
 * @param distributedImage the distributed image
 * @param outputPath the output path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 * @throws Exception the exception
 */
public boolean SaveRasterImageAsHadoopFile(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage, final String outputPath, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY) throws Exception
{
	logger.info("[GeoSparkViz][SaveRasterImageAsHadoopFile][Start]");
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		deleteHadoopFile(outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i)+".", imageType);
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsHadoopFile(integerImageSerializableWrapperTuple2._2.image, outputPath+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsHadoopFile][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:28,代碼來源:ImageGenerator.java

示例11: SaveRasterImageAsS3File

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
/**
 * Save raster image as S 3 file.
 *
 * @param distributedImage the distributed image
 * @param regionName the region name
 * @param accessKey the access key
 * @param secretKey the secret key
 * @param bucketName the bucket name
 * @param path the path
 * @param imageType the image type
 * @param zoomLevel the zoom level
 * @param partitionOnX the partition on X
 * @param partitionOnY the partition on Y
 * @return true, if successful
 */
public boolean SaveRasterImageAsS3File(JavaPairRDD<Integer,ImageSerializableWrapper> distributedImage,
									   final String regionName, final String accessKey, final String secretKey,
									   final String bucketName, final String path, final ImageType imageType, final int zoomLevel, final int partitionOnX, final int partitionOnY)
{
	logger.info("[GeoSparkViz][SaveRasterImageAsS3File][Start]");
	S3Operator s3Operator = new S3Operator(regionName, accessKey, secretKey);
	for(int i=0;i<partitionOnX*partitionOnY;i++) {
		s3Operator.deleteImage(bucketName, path+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,i)+"."+imageType.getTypeName());
	}
	distributedImage.foreach(new VoidFunction<Tuple2<Integer, ImageSerializableWrapper>>() {
		@Override
		public void call(Tuple2<Integer, ImageSerializableWrapper> integerImageSerializableWrapperTuple2) throws Exception {
			SaveRasterImageAsS3File(integerImageSerializableWrapperTuple2._2.image, regionName, accessKey, secretKey, bucketName, path+"-"+RasterizationUtils.getImageTileName(zoomLevel,partitionOnX, partitionOnY,integerImageSerializableWrapperTuple2._1), imageType);
		}
	});
	logger.info("[GeoSparkViz][SaveRasterImageAsS3File][Stop]");
	return true;
}
 
開發者ID:DataSystemsLab,項目名稱:GeoSpark,代碼行數:34,代碼來源:ImageGenerator.java

示例12: publishTriples

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public void publishTriples(JavaRDD<String> datasetRDD) throws IOException {
		logger.debug("Initiating publication of triples on the queue...");

		datasetRDD.foreach(new VoidFunction<String>() {
			private static final long serialVersionUID = 7603190977649586962L;

			@Override
			public void call(String stmt) throws Exception {
				// publish triple (statement) into the exchange 
				if(stmt != null) {
					if(channel == null) {
						logger.warn("Channel was found to be null attempting to publish, reconnecting...");
						connect();
					}
					channel.basicPublish(EXCHANGE_NAME, "", null, stmt.getBytes());
				}
			}
		});
		
		logger.debug("All triples published on the queue. Processing metrics...");
}
 
開發者ID:EIS-Bonn,項目名稱:Luzzu,代碼行數:22,代碼來源:TriplePublisher.java

示例13: create

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public static <A extends JavaRDDLike<?, ?>> VoidFunction<A> create(JavaStreamingContext jsc, long amount, String printf) {
  final LongAccumulator stopAcc = jsc.ssc().sc().longAccumulator();
  return rdd -> {
    if (printf != null)
      System.out.printf(printf, rdd.count());
    if (rdd.count() == 0L) {
      stopAcc.add(1L);
      if (stopAcc.value() >= amount)
        jsc.stop();
    } else
      stopAcc.reset();
  };
}
 
開發者ID:ciandt-dev,項目名稱:gcp,代碼行數:14,代碼來源:IdleStop.java

示例14: noOp

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public static <T> VoidFunction<T> noOp() {
    return new VoidFunction<T>() {
        @Override
        public void call(T t) {
            // do nothing
        }
    };
}
 
開發者ID:ameyamk,項目名稱:spark-streaming-direct-kafka,代碼行數:9,代碼來源:Functions.java

示例15: main

import org.apache.spark.api.java.function.VoidFunction; //導入依賴的package包/類
public static void main(String[] args) {
    SparkConf sparkConf = new SparkConf().setMaster("local[1]").setAppName("StreamMultiTopic");
    JavaSparkContext sc = new JavaSparkContext(sparkConf);

    CuratorFramework curator = OffsetManager.createCurator("127.0.0.1:2181");
    KafkaConsumerPoolFactory<String,String> poolFactory = new KafkaConsumerPoolFactory<>("127.0.0.1:9092", StringDecoder.class, StringDecoder.class);

    ControllerKafkaTopics<String,String> topics = new ControllerKafkaTopics<>(sc.sc(), curator, poolFactory);
    topics.registerTopic("test_multi", "test");
    topics.registerTopic("test_multi", "test2");

    new StreamProcessor<String,String>(topics) {
        @Override
        public final void process() {
            JavaRDD<Tuple2<String,String>> rdd = fetch().toJavaRDD();

            rdd.foreachPartition(new VoidFunction<Iterator<Tuple2<String,String>>>() {
                @Override
                public final void call(final Iterator<Tuple2<String,String>> it) {
                    while (it.hasNext()) {
                        Tuple2<String,String> e = it.next();
                        LOG.info("key=" + e._1 + " message=" + e._2());
                    }
                }
            });

            commit();
        }
    }.run();

    sc.sc().stop();
}
 
開發者ID:jeoffreylim,項目名稱:maelstrom,代碼行數:33,代碼來源:StreamMultiTopic.java


注:本文中的org.apache.spark.api.java.function.VoidFunction類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。