本文整理汇总了Python中rdflib.query.Result.parse方法的典型用法代码示例。如果您正苦于以下问题:Python Result.parse方法的具体用法?Python Result.parse怎么用?Python Result.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rdflib.query.Result
的用法示例。
在下文中一共展示了Result.parse方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: query
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def query(self, query,
initNs={},
initBindings={},
queryGraph=None,
DEBUG=False):
self.debug = DEBUG
assert isinstance(query, basestring)
self.setNamespaceBindings(initNs)
if initBindings:
if not self.sparql11:
raise Exception(
"initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
% (" ".join("?" + str(x) for x in v),
" ".join(self.node_to_sparql(initBindings[x]) for x in v))
self.resetQuery()
if self._is_contextual(queryGraph):
self.addParameter("default-graph-uri", queryGraph)
self.timeout = self._timeout
self.setQuery(query)
return Result.parse(SPARQLWrapper.query(self).response)
示例2: query
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def query(self, query,
initNs={},
initBindings={},
queryGraph=None,
DEBUG=False):
self.debug = DEBUG
assert isinstance(query, basestring)
self.setNamespaceBindings(initNs)
if initBindings:
if not self.sparql11:
raise Exception(
"initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
% (" ".join("?" + str(x) for x in v),
" ".join(initBindings[x].n3() for x in v))
self.resetQuery()
if self.context_aware and queryGraph and queryGraph != '__UNION__':
self.addDefaultGraph(queryGraph)
self.setQuery(query)
return Result.parse(SPARQLWrapper.query(self).response)
示例3: query
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def query(self, query, default_graph=None):
if not self.query_endpoint:
raise SPARQLConnectorException("Query endpoint not set!")
params = {'query': query}
if default_graph:
params["default-graph-uri"] = default_graph
headers = {'Accept': _response_mime_types[self.returnFormat]}
args = dict(self.kwargs)
args.update(url=self.query_endpoint)
# merge params/headers dicts
args.setdefault('params', {})
args.setdefault('headers', {})
args['headers'].update(headers)
if self.method == 'GET':
args['params'].update(params)
elif self.method == 'POST':
args['data'] = params
else:
raise SPARQLConnectorException("Unknown method %s" % self.method)
res = self.session.request(self.method, **args)
res.raise_for_status()
return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
示例4: contexts
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def contexts(self, triple=None):
"""
Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.
Returns instances of this store with the SPARQL wrapper
object updated via addNamedGraph(?NAME).
This causes a named-graph-uri key / value pair to be sent over
the protocol.
Please note that some SPARQL endpoints are not able to find empty named
graphs.
"""
self.resetQuery()
if triple:
nts = self.node_to_sparql
s, p, o = triple
params = (nts(s if s else Variable('s')),
nts(p if p else Variable('p')),
nts(o if o else Variable('o')))
self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
else:
self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')
with contextlib.closing(SPARQLWrapper.query(self).response) as res:
result = Result.parse(res, format=self.returnFormat)
return ( row.name for row in result )
示例5: __len__
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def __len__(self, context=None):
if not self.sparql11:
raise NotImplementedError(
"For performance reasons, this is not" +
"supported for sparql1.0 endpoints")
else:
self.resetQuery()
q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
if self._is_contextual(context):
self.addParameter("default-graph-uri", context.identifier)
self.setQuery(q)
with contextlib.closing(SPARQLWrapper.query(self).response) as res:
result = Result.parse(res, format=self.returnFormat)
return int(next(iter(result)).c)
示例6: query
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def query(self, query,
initNs={},
initBindings={},
queryGraph=None,
DEBUG=False):
self.debug = DEBUG
assert isinstance(query, basestring)
self.setNamespaceBindings(initNs)
if initBindings:
if not self.sparql11:
raise Exception(
"initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
% (" ".join("?" + str(x) for x in v),
" ".join(initBindings[x].n3() for x in v))
self.resetQuery()
if self.context_aware and queryGraph and queryGraph != '__UNION__':
# we care about context
if not re.search('[\s{]GRAPH[{\s]', query, flags=re.I):
# if a GRAPH clause was already specified, move on...
# insert GRAPH clause after/before first/last { }
# not 100% sure how rock-steady this is
i1 = query.index("{") + 1
i2 = query.rindex("}")
query = query[:i1] + ' GRAPH %s { ' % queryGraph.n3() + \
query[i1:i2] + ' } ' + query[i2:]
self.setQuery(query)
return Result.parse(SPARQLWrapper.query(self).response)
示例7: query_test
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def query_test(t):
uri, name, comment, data, graphdata, query, resfile, syntax = t
# the query-eval tests refer to graphs to load by resolvable filenames
rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True
if uri in skiptests:
raise SkipTest()
def skip(reason='(none)'):
print "Skipping %s from now on." % uri
f = open("skiptests.list", "a")
f.write("%s\t%s\n" % (uri, reason))
f.close()
try:
g = Dataset()
if data:
g.default_context.load(data, format=_fmt(data))
if graphdata:
for x in graphdata:
g.load(x, format=_fmt(x))
if not resfile:
# no result - syntax test
if syntax:
translateQuery(parseQuery(
open(query[7:]).read()), base=urljoin(query, '.'))
else:
# negative syntax test
try:
translateQuery(parseQuery(
open(query[7:]).read()), base=urljoin(query, '.'))
assert False, 'Query should not have parsed!'
except:
pass # it's fine - the query should not parse
return
# eval test - carry out query
res2 = g.query(open(query[7:]).read(), base=urljoin(query, '.'))
if resfile.endswith('ttl'):
resg = Graph()
resg.load(resfile, format='turtle', publicID=resfile)
res = RDFResultParser().parse(resg)
elif resfile.endswith('rdf'):
resg = Graph()
resg.load(resfile, publicID=resfile)
res = RDFResultParser().parse(resg)
elif resfile.endswith('srj'):
res = Result.parse(open(resfile[7:]), format='json')
elif resfile.endswith('tsv'):
res = Result.parse(open(resfile[7:]), format='tsv')
elif resfile.endswith('csv'):
res = Result.parse(open(resfile[7:]), format='csv')
# CSV is lossy, round-trip our own resultset to
# lose the same info :)
# write bytes, read strings...
s = BytesIO()
res2.serialize(s, format='csv')
print s.getvalue()
s = StringIO(s.getvalue().decode('utf-8')) # hmm ?
res2 = Result.parse(s, format='csv')
else:
res = Result.parse(open(resfile[7:]), format='xml')
if not DETAILEDASSERT:
eq(res.type, res2.type, 'Types do not match')
if res.type == 'SELECT':
eq(set(res.vars), set(res2.vars), 'Vars do not match')
comp = bindingsCompatible(
set(res),
set(res2)
)
assert comp, 'Bindings do not match'
elif res.type == 'ASK':
eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
elif res.type in ('DESCRIBE', 'CONSTRUCT'):
assert isomorphic(
res.graph, res2.graph), 'graphs are not isomorphic!'
else:
raise Exception('Unknown result type: %s' % res.type)
else:
eq(res.type, res2.type,
'Types do not match: %r != %r' % (res.type, res2.type))
if res.type == 'SELECT':
eq(set(res.vars),
set(res2.vars), 'Vars do not match: %r != %r' % (
set(res.vars), set(res2.vars)))
assert bindingsCompatible(
set(res),
set(res2)
), 'Bindings do not match: \n%s\n!=\n%s' % (
#.........这里部分代码省略.........
示例8: dict
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
line = line.strip()
if line == "":
continue
row = ROW.parseString(line, parseAll=True)
r.bindings.append(
dict(zip(r.vars, (self.convertTerm(x) for x in row))))
return r
except ParseException, err:
print err.line
print " " * (err.column - 1) + "^"
print err
def convertTerm(self, t):
if isinstance(t, CompValue):
if t.name == 'literal':
return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
else:
raise Exception("I dont know how to handle this: %s" % (t,))
else:
return t
if __name__ == '__main__':
import sys
r = Result.parse(file(sys.argv[1]), format='tsv')
print r.vars
print r.bindings
#print r.serialize(format='json')
示例9: triples
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def triples(self, spo, context=None):
"""
- tuple **(s, o, p)**
the triple used as filter for the SPARQL select.
(None, None, None) means anything.
- context **context**
the graph effectively calling this method.
Returns a tuple of triples executing essentially a SPARQL like
SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj }
**context** may include three parameter
to refine the underlying query:
* LIMIT: an integer to limit the number of results
* OFFSET: an integer to enable paging of results
* ORDERBY: an instance of Variable('s'), Variable('o') or Variable('p')
or, by default, the first 'None' from the given triple
.. warning::
- Using LIMIT or OFFSET automatically include ORDERBY otherwise this is
because the results are retrieved in a not deterministic way (depends on
the walking path on the graph)
- Using OFFSET without defining LIMIT will discard the first OFFSET - 1
results
``
a_graph.LIMIT = limit
a_graph.OFFSET = offset
triple_generator = a_graph.triples(mytriple):
#do something
#Removes LIMIT and OFFSET if not required for the next triple() calls
del a_graph.LIMIT
del a_graph.OFFSET
``
"""
s, p, o = spo
vars = []
if not s:
s = Variable('s')
vars.append(s)
if not p:
p = Variable('p')
vars.append(p)
if not o:
o = Variable('o')
vars.append(o)
if vars:
v = ' '.join([term.n3() for term in vars])
verb = 'SELECT %s '%v
else:
verb = 'ASK'
nts = self.node_to_sparql
query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o))
# The ORDER BY is necessary
if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
or hasattr(context, ORDERBY):
var = None
if isinstance(s, Variable):
var = s
elif isinstance(p, Variable):
var = p
elif isinstance(o, Variable):
var = o
elif hasattr(context, ORDERBY) \
and isinstance(getattr(context, ORDERBY), Variable):
var = getattr(context, ORDERBY)
query = query + ' %s %s' % (ORDERBY, var.n3())
try:
query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
except (ValueError, TypeError, AttributeError):
pass
try:
query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
except (ValueError, TypeError, AttributeError):
pass
self.resetQuery()
if self._is_contextual(context):
self.addParameter("default-graph-uri", context.identifier)
self.timeout = self._timeout
self.setQuery(query)
with contextlib.closing(SPARQLWrapper.query(self).response) as res:
result = Result.parse(res, format=self.returnFormat)
if vars:
for row in result:
yield (row.get(s, s),
row.get(p, p),
row.get(o, o)), None # why is the context here not the passed in graph 'context'?
else:
if result.askAnswer:
yield (s,p,o), None
示例10: _parse_response
# 需要导入模块: from rdflib.query import Result [as 别名]
# 或者: from rdflib.query.Result import parse [as 别名]
def _parse_response(cls, rsp):
return list(Result.parse(rsp.buffer))