本文整理汇总了Java中org.apache.tinkerpop.gremlin.structure.util.GraphFactory.open方法的典型用法代码示例。如果您正苦于以下问题:Java GraphFactory.open方法的具体用法?Java GraphFactory.open怎么用?Java GraphFactory.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.tinkerpop.gremlin.structure.util.GraphFactory
的用法示例。
在下文中一共展示了GraphFactory.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: openGraph
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
private Graph openGraph() throws Exception {
Graph graph = GraphFactory.open(configuration);
if (clean) {
if (graph instanceof TinkerGraph) {
((TinkerGraph) graph).clear();
} else if (graph instanceof SqlgGraph) {
SqlgUtil.dropDb((SqlgGraph) graph);
graph.tx().commit();
graph.close();
graph = GraphFactory.open(configuration);
} else {
throw new UnsupportedOperationException("unable to clean graph: " + graph.getClass().getSimpleName());
}
}
return graph;
}
示例2: setup
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Override
public void setup(final Map<String,Object> config) {
logger.info("Initializing authentication with the {}", SimpleAuthenticator.class.getName());
if (null == config) {
throw new IllegalArgumentException(String.format(
"Could not configure a %s - provide a 'config' in the 'authentication' settings",
SimpleAuthenticator.class.getName()));
}
if (!config.containsKey(CONFIG_CREDENTIALS_DB)) {
throw new IllegalStateException(String.format(
"Credentials configuration missing the %s key that points to a graph config file", CONFIG_CREDENTIALS_DB));
}
final Graph graph = GraphFactory.open((String) config.get(CONFIG_CREDENTIALS_DB));
if (graph instanceof TinkerGraph) {
// have to create the indices because they are not stored in gryo
final TinkerGraph tinkerGraph = (TinkerGraph) graph;
tinkerGraph.createIndex(PROPERTY_USERNAME, Vertex.class);
}
credentialStore = CredentialGraph.credentials(graph);
logger.info("CredentialGraph initialized at {}", credentialStore);
}
示例3: main
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if(args.length!=2) {
System.out.println("Usage: hdt2gremlin <file.hdt> <Gremlin Graph Config File>");
System.out.println(" The config follows the syntax of gremlins factory Graph.open().");
System.exit(-1);
}
// Create Graph
Configuration p = new PropertiesConfiguration(args[1]);
try(Graph gremlinGraph = GraphFactory.open(p)){
// Open HDT
try(HDT hdt = HDTManager.mapHDT("args[0]")){
// Import HDT into Graph
StopWatch st = new StopWatch();
importGraph(hdt, gremlinGraph);
System.out.println("Took "+st.stopAndShow());
}
// smallTest(gremlinGraph);
}
System.exit(0);
}
示例4: workerIterationStart
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Override
public void workerIterationStart(final Memory memory) {
if (null == graph) {
graph = GraphFactory.open(configuration.subset(WRITE_GRAPH_CFG_KEY));
LOGGER.info("Opened Graph instance: {}", graph);
try {
listener = new BulkLoadingListener();
g = graph.traversal().withStrategies(EventStrategy.build().addListener(listener).create());
} catch (Exception e) {
try {
graph.close();
} catch (Exception e2) {
LOGGER.warn("Failed to close Graph instance", e2);
}
throw e;
}
} else {
LOGGER.warn("Leaked Graph instance: {}", graph);
}
}
示例5: shouldMaintainOriginalConfigurationObjectGivenToFactory
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
/**
* A {@link Graph} should maintain the original {@code Configuration} object passed to it via {@link GraphFactory}.
*/
@Test
public void shouldMaintainOriginalConfigurationObjectGivenToFactory() throws Exception {
final Configuration originalConfig = graphProvider.newGraphConfiguration("temp2", this.getClass(), name.getMethodName(), null);
final Graph createdGraph = GraphFactory.open(originalConfig);
final Configuration configInGraph = createdGraph.configuration();
final AtomicInteger keyCount = new AtomicInteger(0);
originalConfig.getKeys().forEachRemaining(k -> {
assertTrue(configInGraph.containsKey(k));
keyCount.incrementAndGet();
});
// need some keys in the originalConfig for this test to be meaningful
assertTrue(keyCount.get() > 0);
assertEquals(keyCount.get(), IteratorUtils.count(configInGraph.getKeys()));
graphProvider.clear(createdGraph, originalConfig);
}
示例6: shouldSuccessfullyEvaluateSingleIterationTraversals
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldSuccessfullyEvaluateSingleIterationTraversals() throws Exception {
final String outputLocation = TestHelper.makeTestDataDirectory(SparkSingleIterationStrategyTest.class, UUID.randomUUID().toString());
Configuration configuration = getBaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, outputLocation);
configuration.setProperty(Constants.GREMLIN_HADOOP_DEFAULT_GRAPH_COMPUTER, SparkGraphComputer.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
Graph graph = GraphFactory.open(configuration);
GraphTraversalSource g = graph.traversal().withComputer().withoutStrategies(SparkInterceptorStrategy.class);
assertFalse(g.getStrategies().toList().contains(SparkInterceptorStrategy.instance()));
assertFalse(g.V().count().explain().toString().contains(SparkInterceptorStrategy.class.getSimpleName()));
assertTrue(g.getStrategies().toList().contains(SparkSingleIterationStrategy.instance()));
assertTrue(g.V().count().explain().toString().contains(SparkSingleIterationStrategy.class.getSimpleName()));
test(true, g.V().limit(10));
test(true, g.V().values("age").groupCount());
test(true, g.V().groupCount().by(__.out().count()));
test(true, g.V().outE());
test(true, 6l, g.V().count());
test(true, 6l, g.V().out().count());
test(true, 6l, g.V().local(__.inE()).count());
test(true, 6l, g.V().outE().inV().count());
////
test(false, g.V().outE().inV());
test(false, g.V().both());
test(false, 12l, g.V().both().count());
test(false, g.V().out().id());
test(false, 2l, g.V().out().out().count());
test(false, 6l, g.V().in().count());
test(false, 6l, g.V().inE().count());
}
示例7: shouldNotHaveDanglingPersistedComputeRDDs
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldNotHaveDanglingPersistedComputeRDDs() throws Exception {
Spark.create("local[4]");
final String rddName = TestHelper.makeTestDataDirectory(PersistedInputOutputRDDTest.class, UUID.randomUUID().toString());
final Configuration configuration = super.getBaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, GryoOutputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
Graph graph = GraphFactory.open(configuration);
///
assertEquals(6, graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)).V().out().count().next().longValue());
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
//
assertEquals(2, graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)).V().out().out().count().next().longValue());
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
///////
Spark.close();
}
示例8: shouldNotPersistRDDAcrossJobs
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldNotPersistRDDAcrossJobs() throws Exception {
Spark.create("local[4]");
final String rddName = TestHelper.makeTestDataDirectory(PersistedInputOutputRDDTest.class, UUID.randomUUID().toString());
final Configuration configuration = super.getBaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false); // because the spark context is NOT persisted, neither is the RDD
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
////////
Spark.create("local[4]");
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
Spark.close();
}
示例9: shouldReadFromWriteToArbitraryRDD
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldReadFromWriteToArbitraryRDD() throws Exception {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, ExampleInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, ExampleOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldReadFromWriteToArbitraryRDD"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
}
示例10: shouldSupportHadoopGraphOLTP
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldSupportHadoopGraphOLTP() {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, ExampleInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, GryoOutputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldSupportHadoopGraphOLTP"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
GraphTraversalSource g = graph.traversal(); // OLTP;
assertEquals("person", g.V().has("age", 29).next().label());
assertEquals(Long.valueOf(4), g.V().count().next());
assertEquals(Long.valueOf(0), g.E().count().next());
assertEquals(Long.valueOf(2), g.V().has("age", P.gt(30)).count().next());
}
示例11: shouldWriteToArbitraryRDD
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldWriteToArbitraryRDD() throws Exception {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern-v3d0.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, ExampleOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldWriteToArbitraryRDD"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
}
示例12: shouldNotHaveDanglingPersistedComputeRDDs
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldNotHaveDanglingPersistedComputeRDDs() throws Exception {
Spark.create("local[4]");
final String rddName = TestHelper.makeTestDataDirectory(PersistedInputOutputRDDIntegrateTest.class, UUID.randomUUID().toString());
final Configuration configuration = super.getBaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern-v3d0.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, GryoOutputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
Graph graph = GraphFactory.open(configuration);
///
assertEquals(6, graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)).V().out().count().next().longValue());
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
//
assertEquals(2, graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)).V().out().out().count().next().longValue());
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
///////
Spark.close();
}
示例13: shouldNotPersistRDDAcrossJobs
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Test
public void shouldNotPersistRDDAcrossJobs() throws Exception {
Spark.create("local[4]");
final String rddName = TestHelper.makeTestDataDirectory(PersistedInputOutputRDDIntegrateTest.class, UUID.randomUUID().toString());
final Configuration configuration = super.getBaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern-v3d0.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, rddName);
configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false); // because the spark context is NOT persisted, neither is the RDD
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
////////
Spark.create("local[4]");
assertFalse(Spark.hasRDD(Constants.getGraphLocation(rddName)));
assertEquals(0, Spark.getContext().getPersistentRDDs().size());
Spark.close();
}
示例14: main
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
public static void main(String[] argv) throws InterruptedException, IOException {
Graph hadoopGraph = null;
try {
LOGGER.info("Connect to the hadoop graph");
hadoopGraph = GraphFactory.open(new PropertiesConfiguration(HADOOP_CONFIG_FILE));
ComputeWeightVertexProgram.Builder builder = ComputeWeightVertexProgram.build().withRwGraphConfig(Schema.CONFIG_FILE);
ComputerResult result = hadoopGraph.
compute().
program(
builder.create(hadoopGraph)
).
vertices(hasLabel(Schema.USER)).
submit().get();
result.close();
hadoopGraph.close();
Spark.close();
hadoopGraph = null;
} catch (Exception e) {
e.printStackTrace();
try {
if (hadoopGraph != null) {
hadoopGraph.close();
Spark.close();
}
} catch (Exception e1) {
System.err.println("Couldn't close graph or spark...");
}
}
// we need to call this one or else the program will be waiting forever
LOGGER.info("bye bye");
System.exit(0);
}
示例15: openGraph
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory; //导入方法依赖的package包/类
@Before
public final void openGraph() throws Exception {
// Open a new graph
graph = GraphFactory.open(configuration);
// Hard reset the Sqlg graph database into a clean state
if (graph instanceof SqlgGraph) {
SqlgUtil.dropDb((SqlgGraph) graph);
graph.tx().commit();
graph.close();
graph = GraphFactory.open(configuration);
}
}