当前位置: 首页>>代码示例>>Java>>正文


Java SparkContext.getOrCreate方法代码示例

本文整理汇总了Java中org.apache.spark.SparkContext.getOrCreate方法的典型用法代码示例。如果您正苦于以下问题:Java SparkContext.getOrCreate方法的具体用法?Java SparkContext.getOrCreate怎么用?Java SparkContext.getOrCreate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.spark.SparkContext的用法示例。


在下文中一共展示了SparkContext.getOrCreate方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkVersion

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
static
public void checkVersion(){
	SparkContext sparkContext = SparkContext.getOrCreate();

	int[] version = parseVersion(sparkContext.version());

	if(!Arrays.equals(ConverterUtil.VERSION, version)){
		throw new IllegalArgumentException("Expected Apache Spark ML version " + formatVersion(ConverterUtil.VERSION) + ", got version " + formatVersion(version) + " (" + sparkContext.version() + ")");
	}
}
 
开发者ID:jpmml,项目名称:jpmml-sparkml,代码行数:11,代码来源:ConverterUtil.java

示例2: setupTest

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@After
@Before
public void setupTest() {
    SparkConf sparkConfiguration = new SparkConf();
    sparkConfiguration.setAppName(this.getClass().getCanonicalName() + "-setupTest");
    sparkConfiguration.set("spark.master", "local[4]");
    JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
    sparkContext.close();
    Spark.create(sparkContext.sc());
    Spark.close();
    logger.info("SparkContext has been closed for " + this.getClass().getCanonicalName() + "-setupTest");
}
 
开发者ID:PKUSilvester,项目名称:LiteGraph,代码行数:13,代码来源:AbstractSparkTest.java

示例3: create

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static SparkContext create(final SparkConf sparkConf) {
    if (null == CONTEXT || CONTEXT.isStopped()) {
        sparkConf.setAppName("Apache TinkerPop's Spark-Gremlin");
        CONTEXT = SparkContext.getOrCreate(sparkConf);
    }
    return CONTEXT;
}
 
开发者ID:apache,项目名称:tinkerpop,代码行数:8,代码来源:Spark.java

示例4: recreateStopped

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static SparkContext recreateStopped() {
    if (null == CONTEXT)
        throw new IllegalStateException("The Spark context has not been created.");
    if (!CONTEXT.isStopped())
        throw new IllegalStateException("The Spark context is not stopped.");
    CONTEXT = SparkContext.getOrCreate(CONTEXT.getConf());
    return CONTEXT;
}
 
开发者ID:apache,项目名称:tinkerpop,代码行数:9,代码来源:Spark.java

示例5: create

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static void create(final Configuration configuration) {
    final SparkConf sparkConf = new SparkConf();
    configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
    sparkConf.setAppName("Apache TinkerPop's Spark-Gremlin");
    CONTEXT = SparkContext.getOrCreate(sparkConf);
}
 
开发者ID:PKUSilvester,项目名称:LiteGraph,代码行数:7,代码来源:Spark.java

示例6: shouldSetThreadLocalProperties

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@Test
public void shouldSetThreadLocalProperties() throws Exception {
    final String testName = "ThreadLocalProperties";
    final String rddName = TestHelper.makeTestDataDirectory(LocalPropertyTest.class) + UUID.randomUUID().toString();
    final Configuration configuration = new BaseConfiguration();
    configuration.setProperty("spark.master", "local[4]");
    configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
    configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
    configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
    configuration.setProperty("spark.jobGroup.id", "22");
    Graph graph = GraphFactory.open(configuration);
    graph.compute(SparkGraphComputer.class)
            .result(GraphComputer.ResultGraph.NEW)
            .persist(GraphComputer.Persist.EDGES)
            .program(TraversalVertexProgram.build()
                    .traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
                            "gremlin-groovy",
                            "g.V()").create(graph)).submit().get();
    ////////
    SparkConf sparkConfiguration = new SparkConf();
    sparkConfiguration.setAppName(testName);
    ConfUtil.makeHadoopConfiguration(configuration).forEach(entry -> sparkConfiguration.set(entry.getKey(), entry.getValue()));
    JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
    JavaSparkStatusTracker statusTracker = sparkContext.statusTracker();
    assertTrue(statusTracker.getJobIdsForGroup("22").length >= 1);
    assertTrue(Spark.hasRDD(Constants.getGraphLocation(rddName)));
    ///////
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, PersistedInputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, rddName);
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, null);
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
    configuration.setProperty("spark.jobGroup.id", "44");
    graph = GraphFactory.open(configuration);
    graph.compute(SparkGraphComputer.class)
            .result(GraphComputer.ResultGraph.NEW)
            .persist(GraphComputer.Persist.NOTHING)
            .program(TraversalVertexProgram.build()
                    .traversal(graph.traversal().withComputer(SparkGraphComputer.class),
                            "gremlin-groovy",
                            "g.V()").create(graph)).submit().get();
    ///////
    assertTrue(statusTracker.getJobIdsForGroup("44").length >= 1);
}
 
开发者ID:PKUSilvester,项目名称:LiteGraph,代码行数:50,代码来源:LocalPropertyTest.java

示例7: shouldSetThreadLocalProperties

import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@Test
public void shouldSetThreadLocalProperties() throws Exception {
    final String testName = "ThreadLocalProperties";
    final String rddName = TestHelper.makeTestDataDirectory(LocalPropertyTest.class) + UUID.randomUUID().toString();
    final Configuration configuration = new BaseConfiguration();
    configuration.setProperty("spark.master", "local[4]");
    configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
    configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern-v3d0.kryo"));
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
    configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
    configuration.setProperty("spark.jobGroup.id", "22");
    Graph graph = GraphFactory.open(configuration);
    graph.compute(SparkGraphComputer.class)
            .result(GraphComputer.ResultGraph.NEW)
            .persist(GraphComputer.Persist.EDGES)
            .program(TraversalVertexProgram.build()
                    .traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
                            "gremlin-groovy",
                            "g.V()").create(graph)).submit().get();
    ////////
    SparkConf sparkConfiguration = new SparkConf();
    sparkConfiguration.setAppName(testName);
    ConfUtil.makeHadoopConfiguration(configuration).forEach(entry -> sparkConfiguration.set(entry.getKey(), entry.getValue()));
    JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
    JavaSparkStatusTracker statusTracker = sparkContext.statusTracker();
    assertTrue(statusTracker.getJobIdsForGroup("22").length >= 1);
    assertTrue(Spark.hasRDD(Constants.getGraphLocation(rddName)));
    ///////
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, PersistedInputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, rddName);
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, null);
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
    configuration.setProperty("spark.jobGroup.id", "44");
    graph = GraphFactory.open(configuration);
    graph.compute(SparkGraphComputer.class)
            .result(GraphComputer.ResultGraph.NEW)
            .persist(GraphComputer.Persist.NOTHING)
            .program(TraversalVertexProgram.build()
                    .traversal(graph.traversal().withComputer(SparkGraphComputer.class),
                            "gremlin-groovy",
                            "g.V()").create(graph)).submit().get();
    ///////
    assertTrue(statusTracker.getJobIdsForGroup("44").length >= 1);
}
 
开发者ID:apache,项目名称:tinkerpop,代码行数:50,代码来源:LocalPropertyTest.java


注:本文中的org.apache.spark.SparkContext.getOrCreate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。