本文整理汇总了Java中org.apache.spark.SparkContext.getOrCreate方法的典型用法代码示例。如果您正苦于以下问题:Java SparkContext.getOrCreate方法的具体用法?Java SparkContext.getOrCreate怎么用?Java SparkContext.getOrCreate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.spark.SparkContext
的用法示例。
在下文中一共展示了SparkContext.getOrCreate方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkVersion
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
static
public void checkVersion(){
SparkContext sparkContext = SparkContext.getOrCreate();
int[] version = parseVersion(sparkContext.version());
if(!Arrays.equals(ConverterUtil.VERSION, version)){
throw new IllegalArgumentException("Expected Apache Spark ML version " + formatVersion(ConverterUtil.VERSION) + ", got version " + formatVersion(version) + " (" + sparkContext.version() + ")");
}
}
示例2: setupTest
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@After
@Before
public void setupTest() {
SparkConf sparkConfiguration = new SparkConf();
sparkConfiguration.setAppName(this.getClass().getCanonicalName() + "-setupTest");
sparkConfiguration.set("spark.master", "local[4]");
JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
sparkContext.close();
Spark.create(sparkContext.sc());
Spark.close();
logger.info("SparkContext has been closed for " + this.getClass().getCanonicalName() + "-setupTest");
}
示例3: create
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static SparkContext create(final SparkConf sparkConf) {
if (null == CONTEXT || CONTEXT.isStopped()) {
sparkConf.setAppName("Apache TinkerPop's Spark-Gremlin");
CONTEXT = SparkContext.getOrCreate(sparkConf);
}
return CONTEXT;
}
示例4: recreateStopped
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static SparkContext recreateStopped() {
if (null == CONTEXT)
throw new IllegalStateException("The Spark context has not been created.");
if (!CONTEXT.isStopped())
throw new IllegalStateException("The Spark context is not stopped.");
CONTEXT = SparkContext.getOrCreate(CONTEXT.getConf());
return CONTEXT;
}
示例5: create
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
public static void create(final Configuration configuration) {
final SparkConf sparkConf = new SparkConf();
configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
sparkConf.setAppName("Apache TinkerPop's Spark-Gremlin");
CONTEXT = SparkContext.getOrCreate(sparkConf);
}
示例6: shouldSetThreadLocalProperties
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@Test
public void shouldSetThreadLocalProperties() throws Exception {
final String testName = "ThreadLocalProperties";
final String rddName = TestHelper.makeTestDataDirectory(LocalPropertyTest.class) + UUID.randomUUID().toString();
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
configuration.setProperty("spark.jobGroup.id", "22");
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
////////
SparkConf sparkConfiguration = new SparkConf();
sparkConfiguration.setAppName(testName);
ConfUtil.makeHadoopConfiguration(configuration).forEach(entry -> sparkConfiguration.set(entry.getKey(), entry.getValue()));
JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
JavaSparkStatusTracker statusTracker = sparkContext.statusTracker();
assertTrue(statusTracker.getJobIdsForGroup("22").length >= 1);
assertTrue(Spark.hasRDD(Constants.getGraphLocation(rddName)));
///////
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, PersistedInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, null);
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
configuration.setProperty("spark.jobGroup.id", "44");
graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.NOTHING)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
///////
assertTrue(statusTracker.getJobIdsForGroup("44").length >= 1);
}
示例7: shouldSetThreadLocalProperties
import org.apache.spark.SparkContext; //导入方法依赖的package包/类
@Test
public void shouldSetThreadLocalProperties() throws Exception {
final String testName = "ThreadLocalProperties";
final String rddName = TestHelper.makeTestDataDirectory(LocalPropertyTest.class) + UUID.randomUUID().toString();
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern-v3d0.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
configuration.setProperty("spark.jobGroup.id", "22");
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
////////
SparkConf sparkConfiguration = new SparkConf();
sparkConfiguration.setAppName(testName);
ConfUtil.makeHadoopConfiguration(configuration).forEach(entry -> sparkConfiguration.set(entry.getKey(), entry.getValue()));
JavaSparkContext sparkContext = new JavaSparkContext(SparkContext.getOrCreate(sparkConfiguration));
JavaSparkStatusTracker statusTracker = sparkContext.statusTracker();
assertTrue(statusTracker.getJobIdsForGroup("22").length >= 1);
assertTrue(Spark.hasRDD(Constants.getGraphLocation(rddName)));
///////
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, PersistedInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, null);
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
configuration.setProperty("spark.jobGroup.id", "44");
graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.NOTHING)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
///////
assertTrue(statusTracker.getJobIdsForGroup("44").length >= 1);
}