當前位置: 首頁>>代碼示例>>Java>>正文


Java JavaRDD.foreach方法代碼示例

本文整理匯總了Java中org.apache.spark.api.java.JavaRDD.foreach方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaRDD.foreach方法的具體用法?Java JavaRDD.foreach怎麽用?Java JavaRDD.foreach使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.spark.api.java.JavaRDD的用法示例。


在下文中一共展示了JavaRDD.foreach方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: test

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
@Test
public void test() {
    String hdfsPath = "hdfs://10.196.83.90:9000/stonk/spark/aa/spark-task--aa-b5x59zpv/out3";

    SparkConf conf = new SparkConf().setAppName("111").setMaster("local[3]");
    JavaSparkContext context = new JavaSparkContext(conf);
    JavaRDD<String> rdd = context.textFile(hdfsPath);
    rdd.foreach((str) -> System.out.println(str));
}
 
開發者ID:hays2hong,項目名稱:stonk,代碼行數:10,代碼來源:SparkHDFSTest.java

示例2: assertRDDEquals

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
public <T> void assertRDDEquals(String message, JavaRDD<T> expected, JavaRDD<T> result) {
    Option<Tuple3<T, Integer, Integer>> diff = JavaRDDComparisons.compareRDD(expected, result);
    if (diff.isDefined()) {
        log.error("EXPECTED");
        expected.foreach(row -> log.error(row.toString()));
        log.error("RESULT");
        result.foreach(row -> log.error(row.toString()));
        log.error("FIRST DIFF");
        Tuple3<T, Integer, Integer> diffTriple = diff.get();
        log.error(diffTriple.toString());
        if (diffTriple._2() == 0) {
            log.error("(row not expected but present in result {} times)", diffTriple._3());
        }
        if (diffTriple._3() == 0) {
            log.error("(row expected {} times but not present)", diffTriple._2());
        }
        throw new AssertionError(message);
    }
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:20,代碼來源:TestSparkContextProvider.java

示例3: splitFastq

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
private static void splitFastq(FileStatus fst, String fqPath, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {
  Path fqpath = new Path(fqPath);
  String fqname = fqpath.getName();
  String[] ns = fqname.split("\\.");
  //TODO: Handle also compressed files
  List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, sc.hadoopConfiguration(), splitlen);

  JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

  splitRDD.foreach( split ->  {

    FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
    writeFastqFile(fqreader, new Configuration(), splitDir + "/split_" + split.getStart() + "." + ns[1]);

   });
}
 
開發者ID:NGSeq,項目名稱:ViraPipe,代碼行數:17,代碼來源:InterleaveMulti.java

示例4: main

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
public static void main(String[] args) {
	System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
	Logger rootLogger = LogManager.getRootLogger();
	rootLogger.setLevel(Level.WARN); 
	      SparkSession sparkSession = SparkSession
	      .builder()
	      .master("local")
		  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
	      .appName("JavaALSExample")
	      .getOrCreate();
	      
	   RDD<String> textFile = sparkSession.sparkContext().textFile("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_json.json",2); 
	   
	   JavaRDD<PersonDetails> mapParser = textFile.toJavaRDD().map(v1 -> new ObjectMapper().readValue(v1, PersonDetails.class));
	   
	   mapParser.foreach(t -> System.out.println(t)); 
	  
	   Dataset<Row> anotherPeople = sparkSession.read().json(textFile);
	   
	   anotherPeople.printSchema();
	   anotherPeople.show();
	      
	      
	      Dataset<Row> json_rec = sparkSession.read().json("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_json.json");
	      json_rec.printSchema();
	      
	      json_rec.show();
	      
	      StructType schema = new StructType( new StructField[] {
	    	            DataTypes.createStructField("cid", DataTypes.IntegerType, true),
	    	            DataTypes.createStructField("county", DataTypes.StringType, true),
	    	            DataTypes.createStructField("firstName", DataTypes.StringType, true),
	    	            DataTypes.createStructField("sex", DataTypes.StringType, true),
	    	            DataTypes.createStructField("year", DataTypes.StringType, true),
	    	            DataTypes.createStructField("dateOfBirth", DataTypes.TimestampType, true) });
	      
	    /*  StructType pep = new StructType(new StructField[] {
					new StructField("Count", DataTypes.StringType, true, Metadata.empty()),
					new StructField("County", DataTypes.StringType, true, Metadata.empty()),
					new StructField("First Name", DataTypes.StringType, true, Metadata.empty()),
					new StructField("Sex", DataTypes.StringType, true, Metadata.empty()),
					new StructField("Year", DataTypes.StringType, true, Metadata.empty()),
				    new StructField("timestamp", DataTypes.TimestampType, true, Metadata.empty()) });*/
	      
	     Dataset<Row> person_mod = sparkSession.read().schema(schema).json(textFile);
	     
	     person_mod.printSchema();
	     person_mod.show();
	     
	     person_mod.write().format("json").mode("overwrite").save("C:/Users/sumit.kumar/git/learning/src/main/resources/pep_out.json");

}
 
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:53,代碼來源:JsonFileOperations.java

示例5: splitFastq

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
private static void splitFastq(FileStatus fst, String fqPath, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {
  Path fqpath = new Path(fqPath);
  String fqname = fqpath.getName();
  String[] ns = fqname.split("\\.");
  List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, sc.hadoopConfiguration(), splitlen);

  JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

  splitRDD.foreach( split ->  {

    FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
    writeFastqFile(fqreader, new Configuration(), splitDir + "/split_" + split.getStart() + "." + ns[1]);

   });
}
 
開發者ID:NGSeq,項目名稱:ViraPipe,代碼行數:16,代碼來源:Decompress.java

示例6: main

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
public static void main(String[] args) {
	  System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
		
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
      .appName("JavaALSExample")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN); 

    JavaRDD<Movie> moviesRDD = sparkSession
      .read().textFile("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv")
      .javaRDD().filter( str-> !(null==str))
      .filter(str-> !(str.length()==0))
      .filter(str-> !str.contains("movieId"))	      
      .map(str -> Movie.parseRating(str));
    
    moviesRDD.foreach(m -> System.out.println(m));
    
       Dataset<Row> csv_read = sparkSession.read().format("com.databricks.spark.csv")
	        		      .option("header", "true")
	        		      .option("inferSchema", "true")
	        		      .load("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv");
	       
	       csv_read.printSchema();
	       
	       csv_read.show();
	       
	       
	       StructType customSchema = new StructType(new StructField[] {
	    		    new StructField("movieId", DataTypes.LongType, true, Metadata.empty()),
	    		    new StructField("title", DataTypes.StringType, true, Metadata.empty()),
	    		    new StructField("genres", DataTypes.StringType, true, Metadata.empty())
	    		});
  
	       Dataset<Row> csv_custom_read = sparkSession.read().format("com.databricks.spark.csv")
        		      .option("header", "true")
        		      .schema(customSchema)
        		      .load("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv");
       
	       csv_custom_read.printSchema();
       
	       csv_custom_read.show(); 
	       
	       
	       csv_custom_read.write()
	       .format("com.databricks.spark.csv")
	       .option("header", "true")
	       .option("codec", "org.apache.hadoop.io.compress.GzipCodec")
	       .save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMovies.csv");
	       
}
 
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:55,代碼來源:CSVFileOperations.java

示例7: main

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
	 * @param args
	 */
	public static void main(String[] args) {
		//C:\Users\sumit.kumar\Downloads\bin\warehouse
		//System.setProperty("hadoop.home.dir", "C:\\Users\\sumit.kumar\\Downloads");
		String logFile = "src/main/resources/Apology_by_Plato.txt"; // Should be some file on your system
		Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN);
		 SparkConf conf = new SparkConf().setMaster("local").setAppName("ActionExamples").set("spark.hadoop.validateOutputSpecs", "false");
			JavaSparkContext sparkContext = new JavaSparkContext(conf);
		    JavaRDD<Integer> rdd = sparkContext.parallelize(Arrays.asList(1, 2, 3,4,5),3).cache();	
		    JavaRDD<Integer> evenRDD= rdd.filter(new org.apache.spark.api.java.function.Function<Integer, Boolean>() {
			@Override
			public Boolean call(Integer v1) throws Exception {
			  return ((v1%2)==0)?true:false;
				}
			});
		    
		    evenRDD.persist(StorageLevel.MEMORY_AND_DISK());
		    evenRDD.foreach(new VoidFunction<Integer>() {
			@Override
			public void call(Integer t) throws Exception {
			System.out.println("The value of RDD are :"+t);
			 }
			});
		   //unpersisting the RDD 
		   evenRDD.unpersist();
		   rdd.unpersist();
		   
		   /* JavaRDD<String> lines = spark.read().textFile(logFile).javaRDD().cache();
		    System.out.println("DEBUG: \n"+ lines.toDebugString());
		   long word= lines.count();
		   JavaRDD<String> distinctLines=lines.distinct();
		   System.out.println("DEBUG: \n"+ distinctLines.toDebugString());
		   JavaRDD<String> finalRdd=lines.subtract(distinctLines);
		    
		   
		   System.out.println("DEBUG: \n"+ finalRdd.toDebugString());
		   System.out.println("The count is "+word);
		   System.out.println("The count is "+distinctLines.count());
		   System.out.println("The count is "+finalRdd.count());
		   
		   finalRdd.foreach(new VoidFunction<String>() {
			
			@Override
			public void call(String t) throws Exception {
				// TODO Auto-generated method stub
				System.out.println(t);
			}
		});
*/	    /*SparkConf conf = new SparkConf().setAppName("Simple Application");
	    JavaSparkContext sc = new JavaSparkContext(conf);
	    StorageLevel newLevel;
		JavaRDD<String> logData = sc.textFile(logFile).cache();

	    long numAs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("a"); }
	    }).count();

	    long numBs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("b"); }
	    }).count();

	    System.out.println("Lines with a: " + numAs + ", lines with b: " + numBs);
	    
	    sc.stop();*/

	}
 
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:70,代碼來源:PersistExample.java

示例8: splitFastq

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
private static void splitFastq(FileStatus fst, String splitDir, int splitlen, JavaSparkContext sc) throws IOException {

    //TODO: Handle also compressed files
    List<FileSplit> nlif = NLineInputFormat.getSplitsForFile(fst, new Configuration(), splitlen);

    JavaRDD<FileSplit> splitRDD = sc.parallelize(nlif);

    splitRDD.foreach( split ->  {

      FastqRecordReader fqreader = new FastqRecordReader(new Configuration(), split);
      writeFastqFile(fqreader, new Configuration(), splitDir + "/" + split.getPath().getName()+"_"+split.getStart() + ".fq");

     });
  }
 
開發者ID:NGSeq,項目名稱:ViraPipe,代碼行數:15,代碼來源:DecompressInterleave.java


注:本文中的org.apache.spark.api.java.JavaRDD.foreach方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。