當前位置: 首頁>>代碼示例>>Java>>正文


Java JavaSparkContext.fromSparkContext方法代碼示例

本文整理匯總了Java中org.apache.spark.api.java.JavaSparkContext.fromSparkContext方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaSparkContext.fromSparkContext方法的具體用法?Java JavaSparkContext.fromSparkContext怎麽用?Java JavaSparkContext.fromSparkContext使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.spark.api.java.JavaSparkContext的用法示例。


在下文中一共展示了JavaSparkContext.fromSparkContext方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setUp

import org.apache.spark.api.java.JavaSparkContext; //導入方法依賴的package包/類
@Before
public void setUp() {
    System.setProperty("hadoop.home.dir", "C:\\Users\\VASILIS\\Documents\\hadoop_home"); //only for local mode
    
    spark = SparkSession.builder()
        .appName("test") 
        .config("spark.sql.warehouse.dir", "/file:/tmp")                
        .config("spark.executor.instances", 1)
        .config("spark.executor.cores", 1)
        .config("spark.executor.memory", "1G")            
        .config("spark.driver.maxResultSize", "1g")
        .config("spark.master", "local")
        .getOrCreate();        
    
    
    
    jsc = JavaSparkContext.fromSparkContext(spark.sparkContext()); 
}
 
開發者ID:vefthym,項目名稱:MinoanER,代碼行數:19,代碼來源:EntityBasedCNPMapPhaseTest.java

示例2: setUp

import org.apache.spark.api.java.JavaSparkContext; //導入方法依賴的package包/類
@Before
public void setUp() {        
    System.setProperty("hadoop.home.dir", "C:\\Users\\VASILIS\\Documents\\hadoop_home"); //only for local mode
    
    spark = SparkSession.builder()
        .appName("test") 
        .config("spark.sql.warehouse.dir", "/file:/tmp")                
        .config("spark.executor.instances", 1)
        .config("spark.executor.cores", 1)
        .config("spark.executor.memory", "1G")            
        .config("spark.driver.maxResultSize", "1g")
        .config("spark.master", "local")
        .getOrCreate();        
    
    
    
    jsc = JavaSparkContext.fromSparkContext(spark.sparkContext()); 
}
 
開發者ID:vefthym,項目名稱:MinoanER,代碼行數:19,代碼來源:BlockFilteringAdvancedTest.java

示例3: buildStreamingContext

import org.apache.spark.api.java.JavaSparkContext; //導入方法依賴的package包/類
protected final JavaStreamingContext buildStreamingContext() {
  log.info("Starting SparkContext with interval {} seconds", generationIntervalSec);

  SparkConf sparkConf = new SparkConf();

  // Only for tests, really
  if (sparkConf.getOption("spark.master").isEmpty()) {
    log.info("Overriding master to {} for tests", streamingMaster);
    sparkConf.setMaster(streamingMaster);
  }
  // Only for tests, really
  if (sparkConf.getOption("spark.app.name").isEmpty()) {
    String appName = "Oryx" + getLayerName();
    if (id != null) {
      appName = appName + "-" + id;
    }
    log.info("Overriding app name to {} for tests", appName);
    sparkConf.setAppName(appName);
  }
  extraSparkConfig.forEach((key, value) -> sparkConf.setIfMissing(key, value.toString()));

  // Turn this down to prevent long blocking at shutdown
  sparkConf.setIfMissing(
      "spark.streaming.gracefulStopTimeout",
      Long.toString(TimeUnit.MILLISECONDS.convert(generationIntervalSec, TimeUnit.SECONDS)));
  sparkConf.setIfMissing("spark.cleaner.ttl", Integer.toString(20 * generationIntervalSec));
  long generationIntervalMS =
      TimeUnit.MILLISECONDS.convert(generationIntervalSec, TimeUnit.SECONDS);

  JavaSparkContext jsc = JavaSparkContext.fromSparkContext(SparkContext.getOrCreate(sparkConf));
  return new JavaStreamingContext(jsc, new Duration(generationIntervalMS));
}
 
開發者ID:oncewang,項目名稱:oryx2,代碼行數:33,代碼來源:AbstractSparkLayer.java

示例4: main

import org.apache.spark.api.java.JavaSparkContext; //導入方法依賴的package包/類
public static void main(String[] args) {
    String tmpPath;
    String master;
    String inputPath;        
    String outputPath;
    
    if (args.length == 0) {
        System.setProperty("hadoop.home.dir", "C:\\Users\\VASILIS\\Documents\\hadoop_home"); //only for local mode
        
        tmpPath = "/file:C:\\tmp";
        master = "local[2]";
        inputPath = "/file:C:\\Users\\VASILIS\\Documents\\OAEI_Datasets\\exportedBlocks\\testInput";            
        outputPath = "/file:C:\\Users\\VASILIS\\Documents\\OAEI_Datasets\\exportedBlocks\\testOutput";            
    } else {            
        tmpPath = "/file:/tmp/";
        //master = "spark://master:7077";
        inputPath = args[0];            
        outputPath = args[1];
        // delete existing output directories
        try {                                
            Utils.deleteHDFSPath(outputPath);
        } catch (IOException | URISyntaxException ex) {
            Logger.getLogger(MetaBlockingOnlyValuesCBS.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    String appName = "MetaBlocking CBS only values on "+inputPath.substring(inputPath.lastIndexOf("/", inputPath.length()-2)+1);
    SparkSession spark = Utils.setUpSpark(appName, 288, 8, 3, tmpPath);
    int PARALLELISM = spark.sparkContext().getConf().getInt("spark.default.parallelism", 152);        
    JavaSparkContext jsc = JavaSparkContext.fromSparkContext(spark.sparkContext()); 
    
    
    ////////////////////////
    //start the processing//
    ////////////////////////
    
    //Block Filtering
    System.out.println("\n\nStarting BlockFiltering, reading from "+inputPath);
    LongAccumulator BLOCK_ASSIGNMENTS_ACCUM = jsc.sc().longAccumulator();
    BlockFilteringAdvanced bf = new BlockFilteringAdvanced();
    JavaPairRDD<Integer,IntArrayList> entityIndex = bf.run(jsc.textFile(inputPath), BLOCK_ASSIGNMENTS_ACCUM); 
    entityIndex.cache();        
            
    //Blocks From Entity Index
    System.out.println("\n\nStarting BlocksFromEntityIndex...");
            
    LongAccumulator CLEAN_BLOCK_ACCUM = jsc.sc().longAccumulator();
    LongAccumulator NUM_COMPARISONS_ACCUM = jsc.sc().longAccumulator();
    
    BlocksFromEntityIndex bFromEI = new BlocksFromEntityIndex();
    JavaPairRDD<Integer, IntArrayList> blocksFromEI = bFromEI.run(entityIndex, CLEAN_BLOCK_ACCUM, NUM_COMPARISONS_ACCUM);
    blocksFromEI.persist(StorageLevel.DISK_ONLY());
    
    blocksFromEI.count(); //the simplest action just to run blocksFromEI and get the actual value for the counters below
    
    double BCin = (double) BLOCK_ASSIGNMENTS_ACCUM.value() / entityIndex.count(); //BCin = average number of block assignments per entity
    final int K = Math.max(1, ((Double)Math.floor(BCin)).intValue()); //K = |_BCin -1_|
    System.out.println(BLOCK_ASSIGNMENTS_ACCUM.value()+" block assignments");
    System.out.println(CLEAN_BLOCK_ACCUM.value()+" clean blocks");
    System.out.println(NUM_COMPARISONS_ACCUM.value()+" comparisons");
    System.out.println("BCin = "+BCin);
    System.out.println("K = "+K);
    
    entityIndex.unpersist();
    
    //CNP
    System.out.println("\n\nStarting CNP...");
    CNPCBSValuesOnly cnp = new CNPCBSValuesOnly();
    JavaPairRDD<Integer,IntArrayList> metablockingResults = cnp.run(blocksFromEI, K);
    
    metablockingResults
            .mapValues(x -> x.toString()).saveAsTextFile(outputPath); //only to see the output and add an action (saving to file may not be needed)
    System.out.println("Job finished successfully. Output written in "+outputPath);
}
 
開發者ID:vefthym,項目名稱:MinoanER,代碼行數:74,代碼來源:MetaBlockingOnlyValuesCBS.java

示例5: setUp

import org.apache.spark.api.java.JavaSparkContext; //導入方法依賴的package包/類
@BeforeClass
public static void setUp() {
  SparkConf sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkIT");
  javaSparkContext = JavaSparkContext.fromSparkContext(SparkContext.getOrCreate(sparkConf));
}
 
開發者ID:oncewang,項目名稱:oryx2,代碼行數:6,代碼來源:AbstractSparkIT.java


注:本文中的org.apache.spark.api.java.JavaSparkContext.fromSparkContext方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。