本文整理匯總了Java中org.apache.commons.configuration.BaseConfiguration類的典型用法代碼示例。如果您正苦於以下問題:Java BaseConfiguration類的具體用法?Java BaseConfiguration怎麽用?Java BaseConfiguration使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
BaseConfiguration類屬於org.apache.commons.configuration包,在下文中一共展示了BaseConfiguration類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deserialize
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Override
public TinkerGraph deserialize(final JsonParser jsonParser, final DeserializationContext deserializationContext) throws IOException, JsonProcessingException {
final Configuration conf = new BaseConfiguration();
conf.setProperty("gremlin.tinkergraph.defaultVertexPropertyCardinality", "list");
final TinkerGraph graph = TinkerGraph.open(conf);
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
if (jsonParser.getCurrentName().equals("vertices")) {
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
if (jsonParser.currentToken() == JsonToken.START_OBJECT) {
final DetachedVertex v = (DetachedVertex) deserializationContext.readValue(jsonParser, Vertex.class);
v.attach(Attachable.Method.getOrCreate(graph));
}
}
} else if (jsonParser.getCurrentName().equals("edges")) {
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
if (jsonParser.currentToken() == JsonToken.START_OBJECT) {
final DetachedEdge e = (DetachedEdge) deserializationContext.readValue(jsonParser, Edge.class);
e.attach(Attachable.Method.getOrCreate(graph));
}
}
}
}
return graph;
}
示例2: shouldPersistToGraphML
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldPersistToGraphML() {
final String graphLocation = TestHelper.makeTestDataDirectory(TinkerGraphTest.class) + "shouldPersistToGraphML.xml";
final File f = new File(graphLocation);
if (f.exists() && f.isFile()) f.delete();
final Configuration conf = new BaseConfiguration();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_FORMAT, "graphml");
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_LOCATION, graphLocation);
final TinkerGraph graph = TinkerGraph.open(conf);
TinkerFactory.generateModern(graph);
graph.close();
final TinkerGraph reloadedGraph = TinkerGraph.open(conf);
IoTest.assertModernGraph(reloadedGraph, true, true);
reloadedGraph.close();
}
示例3: shouldPersistToGraphSON
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldPersistToGraphSON() {
final String graphLocation = TestHelper.makeTestDataDirectory(TinkerGraphTest.class) + "shouldPersistToGraphSON.json";
final File f = new File(graphLocation);
if (f.exists() && f.isFile()) f.delete();
final Configuration conf = new BaseConfiguration();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_FORMAT, "graphson");
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_LOCATION, graphLocation);
final TinkerGraph graph = TinkerGraph.open(conf);
TinkerFactory.generateModern(graph);
graph.close();
final TinkerGraph reloadedGraph = TinkerGraph.open(conf);
IoTest.assertModernGraph(reloadedGraph, true, false);
reloadedGraph.close();
}
示例4: shouldPersistToGryo
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldPersistToGryo() {
final String graphLocation = TestHelper.makeTestDataDirectory(TinkerGraphTest.class) + "shouldPersistToGryo.kryo";
final File f = new File(graphLocation);
if (f.exists() && f.isFile()) f.delete();
final Configuration conf = new BaseConfiguration();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_FORMAT, "gryo");
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_LOCATION, graphLocation);
final TinkerGraph graph = TinkerGraph.open(conf);
TinkerFactory.generateModern(graph);
graph.close();
final TinkerGraph reloadedGraph = TinkerGraph.open(conf);
IoTest.assertModernGraph(reloadedGraph, true, false);
reloadedGraph.close();
}
示例5: shouldPersistToGryoAndHandleMultiProperties
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldPersistToGryoAndHandleMultiProperties() {
final String graphLocation = TestHelper.makeTestDataDirectory(TinkerGraphTest.class) + "shouldPersistToGryoMulti.kryo";
final File f = new File(graphLocation);
if (f.exists() && f.isFile()) f.delete();
final Configuration conf = new BaseConfiguration();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_FORMAT, "gryo");
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_LOCATION, graphLocation);
final TinkerGraph graph = TinkerGraph.open(conf);
TinkerFactory.generateTheCrew(graph);
graph.close();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_DEFAULT_VERTEX_PROPERTY_CARDINALITY, VertexProperty.Cardinality.list.toString());
final TinkerGraph reloadedGraph = TinkerGraph.open(conf);
IoTest.assertCrewGraph(reloadedGraph, false);
reloadedGraph.close();
}
示例6: shouldPersistWithRelativePath
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldPersistWithRelativePath() {
final String graphLocation = TestHelper.convertToRelative(TinkerGraphTest.class,
new File(TestHelper.makeTestDataDirectory(TinkerGraphTest.class))) + "shouldPersistToGryoRelative.kryo";
final File f = new File(graphLocation);
if (f.exists() && f.isFile()) f.delete();
final Configuration conf = new BaseConfiguration();
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_FORMAT, "gryo");
conf.setProperty(TinkerGraph.GREMLIN_TINKERGRAPH_GRAPH_LOCATION, graphLocation);
final TinkerGraph graph = TinkerGraph.open(conf);
TinkerFactory.generateModern(graph);
graph.close();
final TinkerGraph reloadedGraph = TinkerGraph.open(conf);
IoTest.assertModernGraph(reloadedGraph, true, false);
reloadedGraph.close();
}
示例7: DataStore
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
public DataStore(Configuration conf) throws QonduitException {
try {
final BaseConfiguration apacheConf = new BaseConfiguration();
Configuration.Accumulo accumuloConf = conf.getAccumulo();
apacheConf.setProperty("instance.name", accumuloConf.getInstanceName());
apacheConf.setProperty("instance.zookeeper.host", accumuloConf.getZookeepers());
final ClientConfiguration aconf = new ClientConfiguration(Collections.singletonList(apacheConf));
final Instance instance = new ZooKeeperInstance(aconf);
connector = instance
.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword()));
} catch (Exception e) {
throw new QonduitException(HttpResponseStatus.INTERNAL_SERVER_ERROR.code(), "Error creating DataStoreImpl",
e.getMessage(), e);
}
}
示例8: configuration
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Override
public Configuration configuration() {
if (this.origConfig != null) {
return this.origConfig;
} else {
Configuration ans = new BaseConfiguration();
ans.setProperty(DB_PATH_KEY, dbPath.toString());
ans.setProperty(ALLOW_FULL_GRAPH_SCANS_KEY, allowFullGraphScans);
ans.setProperty(DEFAULT_ISOLATION_LEVEL_KEY, defaultIsolationLevel.toString());
ans.setProperty(TX_LOG_THRESHOLD_KEY, getTxLogThreshold());
ans.setProperty(REORG_FACTOR_KEY, getReorgFactor());
ans.setProperty(CREATE_DIR_IF_MISSING_KEY, createDirIfMissing);
ans.setProperty(VERTEX_INDICES_KEY, String.join(",", getIndexedKeys(Vertex.class)));
ans.setProperty(EDGE_INDICES_KEY, String.join(",", getIndexedKeys(Vertex.class)));
return ans;
}
}
示例9: main
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
try (ConfigurableApplicationContext ctx = new SpringApplicationBuilder(SpringBootstrap.class)
.bannerMode(Mode.OFF).web(false).run(args)) {
Configuration conf = ctx.getBean(Configuration.class);
final BaseConfiguration apacheConf = new BaseConfiguration();
Configuration.Accumulo accumuloConf = conf.getAccumulo();
apacheConf.setProperty("instance.name", accumuloConf.getInstanceName());
apacheConf.setProperty("instance.zookeeper.host", accumuloConf.getZookeepers());
final ClientConfiguration aconf = new ClientConfiguration(Collections.singletonList(apacheConf));
final Instance instance = new ZooKeeperInstance(aconf);
Connector con = instance.getConnector(accumuloConf.getUsername(),
new PasswordToken(accumuloConf.getPassword()));
Scanner s = con.createScanner(conf.getMetaTable(),
con.securityOperations().getUserAuthorizations(con.whoami()));
try {
s.setRange(new Range(Meta.METRIC_PREFIX, true, Meta.TAG_PREFIX, false));
for (Entry<Key, Value> e : s) {
System.out.println(e.getKey().getRow().toString().substring(Meta.METRIC_PREFIX.length()));
}
} finally {
s.close();
}
}
}
示例10: shouldConfigPoolOnConstructionWithPoolSizeOneAndNoIoRegistry
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldConfigPoolOnConstructionWithPoolSizeOneAndNoIoRegistry() throws Exception {
final Configuration conf = new BaseConfiguration();
final GryoPool pool = GryoPool.build().poolSize(1).ioRegistries(conf.getList(GryoPool.CONFIG_IO_REGISTRY, Collections.emptyList())).create();
final GryoReader reader = pool.takeReader();
final GryoWriter writer = pool.takeWriter();
pool.offerReader(reader);
pool.offerWriter(writer);
for (int ix = 0; ix < 100; ix++) {
final GryoReader r = pool.takeReader();
final GryoWriter w = pool.takeWriter();
assertReaderWriter(w, r, 1, Integer.class);
// should always return the same original instance
assertEquals(reader, r);
assertEquals(writer, w);
pool.offerReader(r);
pool.offerWriter(w);
}
}
示例11: newGraphConfiguration
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Override
public Configuration newGraphConfiguration(final String graphName, final Class<?> test,
final String testMethodName,
final Map<String, Object> configurationOverrides,
final LoadGraphWith.GraphData loadGraphWith) {
final Configuration conf = new BaseConfiguration();
getBaseConfiguration(graphName, test, testMethodName, loadGraphWith).entrySet().stream()
.forEach(e -> conf.setProperty(e.getKey(), e.getValue()));
// assign overrides but don't allow gremlin.graph setting to be overridden. the test suite should
// not be able to override that.
configurationOverrides.entrySet().stream()
.filter(c -> !c.getKey().equals(Graph.GRAPH))
.forEach(e -> conf.setProperty(e.getKey(), e.getValue()));
return conf;
}
示例12: head
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Override
public Iterator<Vertex> head(final String location, final Class readerClass, final int totalLines) {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, location);
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, readerClass.getCanonicalName());
try {
if (InputRDD.class.isAssignableFrom(readerClass)) {
return IteratorUtils.map(((InputRDD) readerClass.getConstructor().newInstance()).readGraphRDD(configuration, new JavaSparkContext(Spark.getContext())).take(totalLines).iterator(), tuple -> tuple._2().get());
} else if (InputFormat.class.isAssignableFrom(readerClass)) {
return IteratorUtils.map(new InputFormatRDD().readGraphRDD(configuration, new JavaSparkContext(Spark.getContext())).take(totalLines).iterator(), tuple -> tuple._2().get());
}
} catch (final Exception e) {
throw new IllegalArgumentException(e.getMessage(), e);
}
throw new IllegalArgumentException("The provided parserClass must be an " + InputFormat.class.getCanonicalName() + " or an " + InputRDD.class.getCanonicalName() + ": " + readerClass.getCanonicalName());
}
示例13: shouldWriteToArbitraryRDD
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldWriteToArbitraryRDD() throws Exception {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, SparkHadoopGraphProvider.PATHS.get("tinkerpop-modern.kryo"));
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, GryoInputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, ExampleOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldWriteToArbitraryRDD"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(Computer.compute(SparkGraphComputer.class)),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
}
示例14: shouldSupportHadoopGraphOLTP
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldSupportHadoopGraphOLTP() {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, ExampleInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, GryoOutputFormat.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldSupportHadoopGraphOLTP"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
GraphTraversalSource g = graph.traversal(); // OLTP;
assertEquals("person", g.V().has("age", 29).next().label());
assertEquals(Long.valueOf(4), g.V().count().next());
assertEquals(Long.valueOf(0), g.E().count().next());
assertEquals(Long.valueOf(2), g.V().has("age", P.gt(30)).count().next());
}
示例15: shouldReadFromWriteToArbitraryRDD
import org.apache.commons.configuration.BaseConfiguration; //導入依賴的package包/類
@Test
public void shouldReadFromWriteToArbitraryRDD() throws Exception {
final Configuration configuration = new BaseConfiguration();
configuration.setProperty("spark.master", "local[4]");
configuration.setProperty("spark.serializer", GryoSerializer.class.getCanonicalName());
configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER, ExampleInputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER, ExampleOutputRDD.class.getCanonicalName());
configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, TestHelper.makeTestDataDirectory(this.getClass(), "shouldReadFromWriteToArbitraryRDD"));
configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
////////
Graph graph = GraphFactory.open(configuration);
graph.compute(SparkGraphComputer.class)
.result(GraphComputer.ResultGraph.NEW)
.persist(GraphComputer.Persist.EDGES)
.program(TraversalVertexProgram.build()
.traversal(graph.traversal().withComputer(SparkGraphComputer.class),
"gremlin-groovy",
"g.V()").create(graph)).submit().get();
}