当前位置: 首页>>代码示例>>Java>>正文


Java SPARQLParser.parseQuery方法代码示例

本文整理汇总了Java中org.openrdf.query.parser.sparql.SPARQLParser.parseQuery方法的典型用法代码示例。如果您正苦于以下问题:Java SPARQLParser.parseQuery方法的具体用法?Java SPARQLParser.parseQuery怎么用?Java SPARQLParser.parseQuery使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.openrdf.query.parser.sparql.SPARQLParser的用法示例。


在下文中一共展示了SPARQLParser.parseQuery方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
public void run() throws MalformedQueryException, QueryEvaluationException, NotEnoughResultsException, SailException {
    CloseableIteration<? extends BindingSet, QueryEvaluationException> it = null;

    try {
        // Execute the query.
        final SPARQLParser sparqlParser = new SPARQLParser();
        final ParsedQuery parsedQuery = sparqlParser.parseQuery(sparql, null);
        it = sailConn.evaluate(parsedQuery.getTupleExpr(), null, null, false);

        // Perform the reads.
        if(numReads.isPresent()) {
            read(it, numReads.get() );
        } else {
            readAll(it);
        }
    } finally {
        if(it != null) {
            it.close();
        }
    }
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:22,代码来源:QueryBenchmark.java

示例2: testSingleIndexLargeQuery

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testSingleIndexLargeQuery() throws Exception {

    final SPARQLParser parser1 = new SPARQLParser();
    final SPARQLParser parser2 = new SPARQLParser();

    final ParsedQuery pq1 = parser1.parseQuery(q8, null);
    final ParsedQuery pq2 = parser2.parseQuery(q7, null);

    final SimpleExternalTupleSet extTup = new SimpleExternalTupleSet(
            new Projection(pq2.getTupleExpr()));

    final List<ExternalTupleSet> list = new ArrayList<ExternalTupleSet>();
    list.add(extTup);

    final TupleExpr tup = pq1.getTupleExpr().clone();
    provider.setIndices(list);
    final PCJOptimizer pcj = new PCJOptimizer(list, false, provider);
    pcj.optimize(tup, null, null);

    final Set<StatementPattern> qSet = Sets.newHashSet(StatementPatternCollector
            .process(pq1.getTupleExpr()));
    final Set<QueryModelNode> eTupSet = PcjIntegrationTestingUtil
            .getTupleSets(tup);

    final Set<StatementPattern> set = Sets.newHashSet();
    for (final QueryModelNode s : eTupSet) {
        set.addAll(StatementPatternCollector.process(((ExternalTupleSet) s)
                .getTupleExpr()));
    }

    Assert.assertTrue(set.equals(qSet));

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:35,代码来源:PrecompJoinOptimizerTest2.java

示例3: testVarRelableIndexSameSize

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testVarRelableIndexSameSize() throws Exception {

    final SPARQLParser parser1 = new SPARQLParser();
    final SPARQLParser parser2 = new SPARQLParser();

    final ParsedQuery pq1 = parser1.parseQuery(q1, null);
    final ParsedQuery pq2 = parser2.parseQuery(q2, null);

    final SimpleExternalTupleSet extTup = new SimpleExternalTupleSet(
            new Projection(pq2.getTupleExpr()));

    final List<ExternalTupleSet> list = new ArrayList<ExternalTupleSet>();
    list.add(extTup);

    final TupleExpr tup = pq1.getTupleExpr().clone();
    provider.setIndices(list);
    final PCJOptimizer pcj = new PCJOptimizer(list, false, provider);
    pcj.optimize(tup, null, null);

    final Set<StatementPattern> qSet = Sets
            .newHashSet(StatementPatternCollector.process(pq1
                    .getTupleExpr()));
    final Set<QueryModelNode> eTupSet = PcjIntegrationTestingUtil
            .getTupleSets(tup);

    final Set<StatementPattern> set = Sets.newHashSet();
    for (final QueryModelNode s : eTupSet) {
        set.addAll(StatementPatternCollector.process(((ExternalTupleSet) s)
                .getTupleExpr()));
    }

    Assert.assertTrue(set.equals(qSet));

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:36,代码来源:PrecompJoinOptimizerTest2.java

示例4: simpleQueryWithoutBindingSetInvalidProperty

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
/**
 * Tests if results are filtered correctly using the metadata properties. In
 * this case, the date for the ingested RyaStatement differs from the date
 * specified in the query.
 */
@Test
public void simpleQueryWithoutBindingSetInvalidProperty() throws Exception {
    MongoDBRyaDAO dao = new MongoDBRyaDAO();
    try {
        dao.setConf(conf);
        dao.init();	

        StatementMetadata metadata = new StatementMetadata();
        metadata.addMetadata(new RyaURI("http://createdBy"), new RyaType("Doug"));
        metadata.addMetadata(new RyaURI("http://createdOn"), new RyaType(XMLSchema.DATE, "2017-02-15"));

        RyaStatement statement = new RyaStatement(new RyaURI("http://Joe"), new RyaURI("http://worksAt"),
                new RyaType("CoffeeShop"), new RyaURI("http://context"), "", metadata);
        dao.add(statement);

        SPARQLParser parser = new SPARQLParser();
        ParsedQuery pq = parser.parseQuery(query, null);
        List<StatementPattern> spList = StatementPatternCollector.process(pq.getTupleExpr());
        StatementMetadataNode<MongoDBRdfConfiguration> node = new StatementMetadataNode<>(spList, conf);
        CloseableIteration<BindingSet, QueryEvaluationException> iteration = node.evaluate(new QueryBindingSet());

        List<BindingSet> bsList = new ArrayList<>();
        while (iteration.hasNext()) {
            bsList.add(iteration.next());
        }
        Assert.assertEquals(0, bsList.size());
        dao.delete(statement, conf);
    } finally {
        dao.destroy();
    }
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:37,代码来源:MongoStatementMetadataNodeIT.java

示例5: testGeoIndexFunction

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testGeoIndexFunction() throws Exception {

    final SPARQLParser parser1 = new SPARQLParser();
    final SPARQLParser parser2 = new SPARQLParser();

    final ParsedQuery pq1 = parser1.parseQuery(q21, null);
    final ParsedQuery pq2 = parser2.parseQuery(q23, null);
    System.out.println("Query is " + pq1.getTupleExpr());
    System.out.println("Index is " + pq2.getTupleExpr());

    final SimpleExternalTupleSet extTup = new SimpleExternalTupleSet(new Projection(pq2.getTupleExpr()));

    final List<ExternalTupleSet> list = new ArrayList<ExternalTupleSet>();
    list.add(extTup);

    final TupleExpr tup = pq1.getTupleExpr().clone();
    final PCJOptimizer pcj = new PCJOptimizer(list, false, new AccumuloIndexSetProvider(new Configuration(), list));
    pcj.optimize(tup, null, null);

    System.out.println("Processed query is " + tup);

    final Set<StatementPattern> qSet = Sets.newHashSet(StatementPatternCollector.process(pq1.getTupleExpr()));
    final Set<QueryModelNode> eTupSet =  PcjIntegrationTestingUtil.getTupleSets(tup);
    final Set<StatementPattern> set = Sets.newHashSet();

    Assert.assertEquals(1, eTupSet.size());

    for (final QueryModelNode s : eTupSet) {
        final Set<StatementPattern> tempSet = Sets.newHashSet(StatementPatternCollector.process(((ExternalTupleSet) s)
                .getTupleExpr()));
        set.addAll(tempSet);

    }

    Assert.assertTrue(qSet.containsAll(set));
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:38,代码来源:PrecompJoinOptimizerVarToConstTest.java

示例6: testBasicSegment

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testBasicSegment() throws MalformedQueryException {

	String query1 = ""//
			+ "SELECT ?e ?c ?l" //
			+ "{" //
			+ "  ?e a ?c . "//
			+ "  ?e <uri:talksTo> ?l  . "//
			+ "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l "//
			+ "}";//

	String query2 = ""//
			+ "SELECT ?e ?c ?l" //
			+ "{" //
			+ "  ?e a ?c . "//
			+ "  ?e <uri:talksTo> ?l  . "//
			+ "}";//

	SPARQLParser parser = new SPARQLParser();
	ParsedQuery pq1 = parser.parseQuery(query1, null);
	ParsedQuery pq2 = parser.parseQuery(query2, null);
	TupleExpr te1 = pq1.getTupleExpr();
	TupleExpr te2 = pq2.getTupleExpr();
	Projection proj = (Projection) te1;
	Join join = (Join) proj.getArg();

	ExternalSetMatcher<ExternalTupleSet> jsm = pcjFactory.getMatcher(qFactory.getQuerySegment(join));
	SimpleExternalTupleSet pcj = new SimpleExternalTupleSet((Projection)te2);
	Assert.assertEquals(true, jsm.match(pcj));
	TupleExpr te = jsm.getQuery();
	Assert.assertEquals(new HashSet<QueryModelNode>(), jsm.getUnmatchedArgNodes());

	Set<QueryModelNode> qNodes = QueryNodeGatherer.getNodes(te);
	List<QueryModelNode> nodes = jsm.getOrderedNodes();
	Set<QueryModelNode> nodeSet = new HashSet<>();
	nodeSet.add(nodes.get(0));
	nodeSet.add(pcj);

	Assert.assertEquals(nodeSet, new HashSet<QueryModelNode>(nodes));
	Assert.assertEquals(nodeSet, qNodes);

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:43,代码来源:JoinSegmentPCJMatcherTest.java

示例7: testEqThreePermuteVars

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
/**
 * @throws Exception
 *             Tests QueryVariableNormalizerContext on queries q1 and q14
 *             which are the same up to the permutation of their variables.
 */
@Test
public void testEqThreePermuteVars() throws Exception {

	SPARQLParser parser1 = new SPARQLParser();
	SPARQLParser parser2 = new SPARQLParser();

	ParsedQuery pq1 = parser1.parseQuery(q1, null);
	ParsedQuery pq2 = parser2.parseQuery(q14, null);


	List<TupleExpr> normalize = QueryVariableNormalizer.getNormalizedIndex(pq1.getTupleExpr(),
			pq2.getTupleExpr());

	Assert.assertEquals(1, normalize.size());

	for (TupleExpr s : normalize) {
		Assert.assertTrue(tupleEquals(s, pq1.getTupleExpr()));
	}

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:26,代码来源:QueryVariableNormalizerTest.java

示例8: simpleQueryWithoutBindingSets

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void simpleQueryWithoutBindingSets()
        throws MalformedQueryException, QueryEvaluationException, RyaDAOException {
    //query is used to build statement that will be evaluated
    String query = "select ?x ?c where{ graph ?c  {?x <uri:talksTo> <uri:Bob>. }}";
    SPARQLParser parser = new SPARQLParser();
    ParsedQuery pq = parser.parseQuery(query, null);
    List<StatementPattern> spList = StatementPatternCollector.process(pq.getTupleExpr());
    
    RyaStatement statement1 = new RyaStatement(new RyaURI("uri:Joe"), new RyaURI("uri:talksTo"),
            new RyaType("uri:Bob"), new RyaURI("uri:context1"), "", new StatementMetadata());
    dao.add(statement1);
    
    RyaStatement statement2 = new RyaStatement(new RyaURI("uri:Doug"), new RyaURI("uri:talksTo"),
            new RyaType("uri:Bob"), new RyaURI("uri:context2"), "", new StatementMetadata());
    dao.add(statement2);
    
    RyaStatement statement3 = new RyaStatement(new RyaURI("uri:Eric"), new RyaURI("uri:talksTo"),
            new RyaType("uri:Bob"), new RyaURI("uri:context3"), "", new StatementMetadata());
    dao.add(statement3);

    QueryBindingSet bsConstraint1 = new QueryBindingSet();
    
    CloseableIteration<BindingSet, QueryEvaluationException> iteration = eval.evaluate(spList.get(0), Arrays.asList(bsConstraint1));

    List<BindingSet> bsList = new ArrayList<>();
    while (iteration.hasNext()) {
        bsList.add(iteration.next());
    }

    Assert.assertEquals(3, bsList.size());
    
    QueryBindingSet expected1 = new QueryBindingSet();
    expected1.addBinding("x", new URIImpl("uri:Joe"));
    expected1.addBinding("c", new URIImpl("uri:context1"));

    QueryBindingSet expected2 = new QueryBindingSet();
    expected2.addBinding("x", new URIImpl("uri:Doug"));
    expected2.addBinding("c", new URIImpl("uri:context2"));
    
    QueryBindingSet expected3 = new QueryBindingSet();
    expected3.addBinding("x", new URIImpl("uri:Eric"));
    expected3.addBinding("c", new URIImpl("uri:context3"));
    
    Set<BindingSet> expected = new HashSet<>(Arrays.asList(expected1, expected2, expected3));
    Set<BindingSet> actual = new HashSet<>(bsList);
    
    Assert.assertEquals(expected, actual);
    dao.delete(Arrays.asList(statement1, statement2, statement3).iterator(), conf);
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:51,代码来源:StatementPatternEvalTest.java

示例9: testMoveAcrossMultipleLeftJoins

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testMoveAcrossMultipleLeftJoins() throws Exception {

	String query1 = ""//
			+ "SELECT ?a ?b ?c ?e ?f" //
			+ "{" //
			+ "  ?c <uri:p5> <uri:o2> ." //
			+ "  ?a <uri:p4> <uri:o1> . "//
			+ "  OPTIONAL{?a <uri:p3> ?b} . " //
			+ "  OPTIONAL{<uri:s2> <uri:p2> ?e} . "//
			+ "  OPTIONAL{<uri:s2> <uri:p2> ?f} . "//
			+ "  <uri:s1> <uri:p1> ?f "
			+ "}";//

	String query2 = ""//
			+ "SELECT ?f ?c" //
			+ "{" //
			+ "  ?c <uri:p5> <uri:o2> . "//
			+ "  <uri:s1> <uri:p1> ?f "
			+ "}";//

	SPARQLParser parser = new SPARQLParser();
	ParsedQuery pq1 = parser.parseQuery(query1, null);
	ParsedQuery pq2 = parser.parseQuery(query2, null);
	TupleExpr te1 = pq1.getTupleExpr();
	TupleExpr te2 = pq2.getTupleExpr();
	Join join1 = (Join) ((Projection) te1).getArg();
	Join join2 = (Join) ((Projection) te2).getArg();

    QuerySegment<ExternalTupleSet> seg1 = qFactory.getQuerySegment(join1);
       QuerySegment<ExternalTupleSet> seg2 = qFactory.getQuerySegment(join2);

	QueryNodeConsolidator consolidator = new QueryNodeConsolidator(seg1.getOrderedNodes(), seg2.getOrderedNodes());

	List<QueryModelNode> queryNodes = new ArrayList<>(seg1.getOrderedNodes());
	QueryModelNode node = queryNodes.remove(5);
	queryNodes.add(1,node);

	Assert.assertTrue(consolidator.consolidateNodes());
	Assert.assertEquals(consolidator.getQueryNodes(), queryNodes);
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:42,代码来源:PCJNodeConsolidatorTest.java

示例10: simpleQueryWithConstantContext

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
/**
 * Tests if StatementMetadataNode joins BindingSet values correctly for
 * variables appearing as the object in one of the StatementPattern
 * statements (in the case ?x appears as the Object in the statement
 * _:blankNode rdf:object ?x). StatementPattern statements have either
 * rdf:subject, rdf:predicate, or rdf:object as the predicate. Additionally,
 * this test also determines whether node uses specified context as a query
 * constraint.
 * 
 * @throws MalformedQueryException
 * @throws QueryEvaluationException
 * @throws RyaDAOException
 */
@Test
public void simpleQueryWithConstantContext()
        throws MalformedQueryException, QueryEvaluationException, RyaDAOException {

    // query used to create StatementPatternMetadataNode
    String contextQuery = "prefix owl: <http://www.w3.org/2002/07/owl#> prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> select ?x ?y where { graph <http://context_1> {_:blankNode rdf:type owl:Annotation; owl:annotatedSource <http://Joe>; "
            + "owl:annotatedProperty <http://worksAt>; owl:annotatedTarget ?x; <http://createdBy> ?y; <http://createdOn> \'2017-01-04\'^^xsd:date }}";

    StatementMetadata metadata = new StatementMetadata();
    metadata.addMetadata(new RyaURI("http://createdBy"), new RyaType("Joe"));
    metadata.addMetadata(new RyaURI("http://createdOn"), new RyaType(XMLSchema.DATE, "2017-01-04"));

    RyaStatement statement1 = new RyaStatement(new RyaURI("http://Joe"), new RyaURI("http://worksAt"),
            new RyaType("CoffeeShop"), new RyaURI("http://context_1"), "", metadata);
    RyaStatement statement2 = new RyaStatement(new RyaURI("http://Joe"), new RyaURI("http://worksAt"),
            new RyaType("HardwareStore"), new RyaURI("http://context_2"), "", metadata);
    dao.add(statement1);
    dao.add(statement2);

    SPARQLParser parser = new SPARQLParser();
    ParsedQuery pq = parser.parseQuery(contextQuery, null);
    List<StatementPattern> spList = StatementPatternCollector.process(pq.getTupleExpr());
    StatementMetadataNode<AccumuloRdfConfiguration> node = new StatementMetadataNode<>(spList, conf);

    List<BindingSet> bsCollection = new ArrayList<>();
    QueryBindingSet bsConstraint1 = new QueryBindingSet();
    bsConstraint1.addBinding("x", new LiteralImpl("CoffeeShop"));
    bsConstraint1.addBinding("z", new LiteralImpl("Virginia"));

    QueryBindingSet bsConstraint2 = new QueryBindingSet();
    bsConstraint2.addBinding("x", new LiteralImpl("HardwareStore"));
    bsConstraint2.addBinding("z", new LiteralImpl("Maryland"));

    QueryBindingSet bsConstraint3 = new QueryBindingSet();
    bsConstraint3.addBinding("x", new LiteralImpl("BurgerShack"));
    bsConstraint3.addBinding("z", new LiteralImpl("Delaware"));
    bsCollection.add(bsConstraint1);
    bsCollection.add(bsConstraint2);
    bsCollection.add(bsConstraint3);

    CloseableIteration<BindingSet, QueryEvaluationException> iteration = node.evaluate(bsCollection);

    QueryBindingSet expected1 = new QueryBindingSet();
    expected1.addBinding("x", new LiteralImpl("CoffeeShop"));
    expected1.addBinding("y", new LiteralImpl("Joe"));
    expected1.addBinding("z", new LiteralImpl("Virginia"));

    List<BindingSet> bsList = new ArrayList<>();
    while (iteration.hasNext()) {
        bsList.add(iteration.next());
    }

    Assert.assertEquals(1, bsList.size());
    Assert.assertEquals(expected1, bsList.get(0));

    dao.delete(statement1, conf);
    dao.delete(statement2, conf);
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:72,代码来源:AccumuloStatementMetadataNodeTest.java

示例11: testNeq1

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
/**
 * @throws Exception
 *             Tests QueryVariableNormalizerContext to see if it recognizes
 *             that no substitution exists for two small, similar queries q3
 *             and q4 that are structurally different
 */
@Test
public void testNeq1() throws Exception {

	SPARQLParser parser1 = new SPARQLParser();
	SPARQLParser parser2 = new SPARQLParser();

	ParsedQuery pq1 = parser1.parseQuery(q3, null);
	ParsedQuery pq2 = parser2.parseQuery(q4, null);

	List<TupleExpr> normalize = QueryVariableNormalizer.getNormalizedIndex(pq1.getTupleExpr(),
			pq2.getTupleExpr());

	Assert.assertTrue(normalize.size() == 0);

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:22,代码来源:QueryVariableNormalizerTest.java

示例12: testThreeIndexLargeQuery

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testThreeIndexLargeQuery() throws Exception {

    final SPARQLParser parser1 = new SPARQLParser();
    final SPARQLParser parser2 = new SPARQLParser();
    final SPARQLParser parser3 = new SPARQLParser();
    final SPARQLParser parser4 = new SPARQLParser();

    final ParsedQuery pq1 = parser1.parseQuery(q13, null);
    final ParsedQuery pq2 = parser2.parseQuery(q5, null);
    final ParsedQuery pq3 = parser3.parseQuery(q12, null);
    final ParsedQuery pq4 = parser4.parseQuery(q14, null);

    System.out.println("Query is " + pq1.getTupleExpr());
    System.out.println("Indexes are " + pq2.getTupleExpr() + " , "
            + pq3.getTupleExpr() + " , " + pq4.getTupleExpr());

    final SimpleExternalTupleSet extTup1 = new SimpleExternalTupleSet(
            (Projection) pq2.getTupleExpr());
    final SimpleExternalTupleSet extTup2 = new SimpleExternalTupleSet(
            (Projection) pq3.getTupleExpr());
    final SimpleExternalTupleSet extTup3 = new SimpleExternalTupleSet(
            (Projection) pq4.getTupleExpr());

    final List<ExternalTupleSet> list = new ArrayList<ExternalTupleSet>();
    list.add(extTup1);
    list.add(extTup2);
    list.add(extTup3);

    final TupleExpr tup = pq1.getTupleExpr().clone();
    provider.setIndices(list);
    final PCJOptimizer pcj = new PCJOptimizer(list, false, provider);
    pcj.optimize(tup, null, null);

    final Set<StatementPattern> qSet = Sets.newHashSet(StatementPatternCollector
            .process(pq1.getTupleExpr()));
    final Set<QueryModelNode> eTupSet = PcjIntegrationTestingUtil
            .getTupleSets(tup);

    final Set<StatementPattern> set = Sets.newHashSet();
    for (final QueryModelNode s : eTupSet) {
        set.addAll(StatementPatternCollector.process(((ExternalTupleSet) s)
                .getTupleExpr()));
    }

    Assert.assertTrue(set.equals(qSet));

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:49,代码来源:PrecompJoinOptimizerTest2.java

示例13: testEvaluateTwoIndexCrossProduct2

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testEvaluateTwoIndexCrossProduct2()
        throws Exception {


    final String indexSparqlString = ""//
            + "SELECT ?e ?l ?c  " //
            + "{" //
            + "  ?e a ?c . "//
            + "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l "//
            + "}";//

    final String indexSparqlString2 = ""//
            + "SELECT ?e ?l ?o " //
            + "{" //
            + "  ?e <uri:talksTo> ?o . "//
            + "  ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l "//
            + "}";//

    final String queryString = ""//
            + "SELECT ?e ?c ?l ?o ?f ?g " //
            + "{" //
            + "  ?e a ?c . "//
            + "  ?e <http://www.w3.org/2000/01/rdf-schema#label> ?l . "//
            + "  ?e <uri:talksTo> ?o . "//
            + "  ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l . "//
            + "  ?f <uri:talksTo> ?g . " //
            + "}";//

    final SPARQLParser sp = new SPARQLParser();
    final ParsedQuery index1 = sp.parseQuery(indexSparqlString, null);
    final ParsedQuery index2 = sp.parseQuery(indexSparqlString2, null);

    final List<ExternalTupleSet> index = Lists.newArrayList();

    final SimpleExternalTupleSet ais1 = new SimpleExternalTupleSet(
            (Projection) index1.getTupleExpr());
    final SimpleExternalTupleSet ais2 = new SimpleExternalTupleSet(
            (Projection) index2.getTupleExpr());

    index.add(ais1);
    index.add(ais2);

    final ParsedQuery pq = sp.parseQuery(queryString, null);
    final TupleExpr tup = pq.getTupleExpr().clone();
    provider.setIndices(index);
    final PCJOptimizer pcj = new PCJOptimizer(index, false, provider);
    pcj.optimize(tup, null, null);

    final IndexPlanValidator ipv = new IndexPlanValidator(true);
    Assert.assertEquals(false, ipv.isValid(tup));

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:54,代码来源:IndexPlanValidatorTest.java

示例14: testSubsetMatchLargeReOrdered

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void testSubsetMatchLargeReOrdered() throws Exception {

    final String query1 = ""//
            + "SELECT ?a ?b ?c ?d ?e ?f ?g ?h" //
            + "{" //
            + "  ?a <uri:p0> ?b ." //
            + "  OPTIONAL{?b <uri:p2> ?c. ?c <uri:p1> ?d} . " //
            + "  OPTIONAL{?b <uri:p3> ?e. ?e <uri:p1> ?f} . "//
            + "  OPTIONAL{?b <uri:p4> ?g. ?g <uri:p1> ?h} . "//
            + "  OPTIONAL{?b <uri:p5> ?i. ?i <uri:p6> ?j} . "//
            + "  OPTIONAL{?b <uri:p5> ?k. ?k <uri:p6> ?l} . "//
            + "  OPTIONAL{?b <uri:p5> ?m. ?m <uri:p6> ?n} . "//
            + "  OPTIONAL{?b <uri:p4> ?o. ?o <uri:p1> ?p} . "//
            + "}";//

    final String query2 = ""//
            + "SELECT ?a ?b ?c ?d ?e ?f ?g ?h" //
            + "{" //
            + "  ?a <uri:p0> ?b ." //
            + "  OPTIONAL{?b <uri:p4> ?o. ?o <uri:p1> ?p} . "//
            + "  OPTIONAL{?b <uri:p4> ?g. ?g <uri:p1> ?h} . "//
            + "  OPTIONAL{?b <uri:p2> ?c. ?c <uri:p1> ?d} . " //
            + "  OPTIONAL{?b <uri:p3> ?e. ?e <uri:p1> ?f} . "//
            + "}";//

    final SPARQLParser parser = new SPARQLParser();
    final ParsedQuery pq1 = parser.parseQuery(query1, null);
    final ParsedQuery pq2 = parser.parseQuery(query2, null);
    final TupleExpr te1 = pq1.getTupleExpr();
    final TupleExpr te2 = pq2.getTupleExpr();

    final TupleExpr unOpt = te1.clone();
    final List<QueryModelNode> remainingNodes = getNodes(te1);
    final Set<QueryModelNode> unMatchedNodes = new HashSet<>();
    unMatchedNodes.add(remainingNodes.get(8));
    unMatchedNodes.add(remainingNodes.get(9));
    unMatchedNodes.add(remainingNodes.get(10));
    unMatchedNodes.add(remainingNodes.get(11));
    unMatchedNodes.add(remainingNodes.get(12));
    unMatchedNodes.add(remainingNodes.get(7));

    final SimpleExternalTupleSet pcj = new SimpleExternalTupleSet((Projection) te2);
    final List<ExternalTupleSet> externalList = new ArrayList<>();
    externalList.add(pcj);

    provider.setIndices(externalList);
    final PCJOptimizer optimizer = new PCJOptimizer(externalList, false, provider);
    optimizer.optimize(te1, null, null);

    Assert.assertEquals(true, validatePcj(te1, unOpt, externalList, unMatchedNodes));
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:53,代码来源:PCJOptimizerTest.java

示例15: simpleQueryWithBindingSet

import org.openrdf.query.parser.sparql.SPARQLParser; //导入方法依赖的package包/类
@Test
public void simpleQueryWithBindingSet() throws Exception {
    MongoDBRyaDAO dao = new MongoDBRyaDAO();
    try {
        dao.setConf(conf);
        dao.init();
        StatementMetadata metadata = new StatementMetadata();
        metadata.addMetadata(new RyaURI("http://createdBy"), new RyaType("Joe"));
        metadata.addMetadata(new RyaURI("http://createdOn"), new RyaType(XMLSchema.DATE, "2017-01-04"));

        RyaStatement statement1 = new RyaStatement(new RyaURI("http://Joe"), new RyaURI("http://worksAt"),
                new RyaType("CoffeeShop"), new RyaURI("http://context"), "", metadata);
        RyaStatement statement2 = new RyaStatement(new RyaURI("http://Joe"), new RyaURI("http://worksAt"),
                new RyaType("HardwareStore"), new RyaURI("http://context"), "", metadata);
        dao.add(statement1);
        dao.add(statement2);

        SPARQLParser parser = new SPARQLParser();
        ParsedQuery pq = parser.parseQuery(query, null);
        List<StatementPattern> spList = StatementPatternCollector.process(pq.getTupleExpr());
        StatementMetadataNode<MongoDBRdfConfiguration> node = new StatementMetadataNode<>(spList, conf);

        QueryBindingSet bsConstraint = new QueryBindingSet();
        bsConstraint.addBinding("x", new LiteralImpl("CoffeeShop"));
        bsConstraint.addBinding("z", new LiteralImpl("Virginia"));

        CloseableIteration<BindingSet, QueryEvaluationException> iteration = node.evaluate(bsConstraint);

        QueryBindingSet expected = new QueryBindingSet();
        expected.addBinding("x", new LiteralImpl("CoffeeShop"));
        expected.addBinding("y", new LiteralImpl("Joe"));
        expected.addBinding("z", new LiteralImpl("Virginia"));

        List<BindingSet> bsList = new ArrayList<>();
        while (iteration.hasNext()) {
            bsList.add(iteration.next());
        }

        Assert.assertEquals(1, bsList.size());
        Assert.assertEquals(expected, bsList.get(0));

        dao.delete(statement1, conf);
        dao.delete(statement2, conf);
    } finally {
        dao.destroy();
    }
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:48,代码来源:MongoStatementMetadataNodeIT.java


注:本文中的org.openrdf.query.parser.sparql.SPARQLParser.parseQuery方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。