本文整理汇总了Python中rdflib.plugin.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: query
def query(
self,
strOrQuery,
initBindings={},
initNs={},
DEBUG=False,
PARSE_DEBUG=False,
dataSetBase=None,
processor="sparql",
extensionFunctions={sparql.DESCRIBE: describe},
):
"""
Executes a SPARQL query (eventually will support Versa queries with
same method) against this Graph.
- `strOrQuery`: Either a string consisting of the SPARQL query or
an instance of rdflib.sparql.bison.Query.Query
- `initBindings`: A mapping from a Variable to an RDFLib term (used
as initial bindings for SPARQL query)
- `initNS`: A mapping from a namespace prefix to an instance of
rdflib.Namespace (used for SPARQL query)
- `DEBUG`: A boolean flag passed on to the SPARQL parser and
evaluation engine
- `processor`: The kind of RDF query (must be 'sparql' until Versa
is ported)
- `USE_PYPARSING`: A flag indicating whether to use the
experimental pyparsing parser for SPARQL
"""
assert processor == "sparql", "SPARQL is currently the only supported RDF query language"
p = plugin.get(processor, sparql.Processor)(self)
return plugin.get("SPARQLQueryResult", query.result.QueryResult)(
p.query(strOrQuery, initBindings, initNs, DEBUG, PARSE_DEBUG, dataSetBase, extensionFunctions)
)
示例2: query
def query(self, strOrQuery, initBindings={}, initNs={}, DEBUG=False,
dataSetBase=None,
processor="sparql",
extensionFunctions={sparql.DESCRIBE:describe}):
"""
Executes a SPARQL query (eventually will support Versa queries with same method) against this Graph
strOrQuery - Is either a string consisting of the SPARQL query or an instance of rdflib.sparql.bison.Query.Query
initBindings - A mapping from a Variable to an RDFLib term (used as initial bindings for SPARQL query)
initNS - A mapping from a namespace prefix to an instance of rdflib.Namespace (used for SPARQL query)
DEBUG - A boolean flag passed on to the SPARQL parser and evaluation engine
processor - The kind of RDF query (must be 'sparql' until Versa is ported)
"""
assert processor == 'sparql',"SPARQL is currently the only supported RDF query language"
p = plugin.get(processor, sparql.Processor)(self)
return plugin.get('SPARQLQueryResult',QueryResult)(p.query(strOrQuery,
initBindings,
initNs,
DEBUG,
dataSetBase,
extensionFunctions))
processor_plugin = plugin.get(processor, sparql.Processor)(self.store)
qresult_plugin = plugin.get('SPARQLQueryResult', QueryResult)
res = processor_plugin.query(strOrQuery,
initBindings,
initNs,
DEBUG,
extensionFunctions=extensionFunctions)
return qresult_plugin(res)
示例3: sample_query
def sample_query(self, querystring):
print "Query enter"
processor = plugin.get('sparql', rdflib.query.Processor)(self.graph)
result = plugin.get('sparql', rdflib.query.Result)
ns = dict(self.graph.namespace_manager.namespaces())
return result(processor.query(querystring, initNs=ns))
示例4: __query
def __query(self, query_object, processor='sparql', result='sparql',
initBindings={}):
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
if not isinstance(result, query.Result):
result = plugin.get(result, query.Result)
return result(processor.query(query_object, initBindings, namespaces))
示例5: query
def query(self, query_object, processor='sparql', result='sparql'):
"""
"""
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
if not isinstance(result, query.Result):
result = plugin.get(result, query.Result)
return result(processor.query(query_object))
示例6: __init__
def __init__(self, configuration, db, create):
self.configuration = configuration
self.create = create
self.db = db
if db:
self.store = plugin.get(self.storeType, store.Store)(db)
else:
self.store = plugin.get(self.storeType, store.Store)()
self.store.open(configuration, create)
示例7: query
def query(self, query_object, processor='sparql', result='sparql', initNs={}, initBindings={}, use_store_provided=True, **kwargs):
"""
"""
if hasattr(self.store, "query") and use_store_provided:
return self.store.query(self,query_object, initNs, initBindings, **kwargs)
if not isinstance(result, query.Result):
result = plugin.get(result, query.Result)
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
return result(processor.query(query_object, initBindings, initNs, **kwargs))
示例8: main
def main(fd, store_type=None, store_id=None, graph_id=None, gzipped=False):
"""
Converts MARC21 data stored in fd to a RDFlib graph.
"""
from rdflib import plugin
if store_type:
msg = "Need a {} identifier for a disk-based store."
assert store_id, msg.format('store')
assert graph_id, msg.format('graph')
store = plugin.get(store_type, Store)(store_id)
else:
store = 'default'
graph = Graph(store=store, identifier=graph_id)
try:
records = MARCReader(open(fd))
for i, triple in enumerate(process_records(records)):
graph.add(triple)
if i % 100 == 0:
graph.commit()
if i % 10000 == 0:
print i
finally:
graph.commit()
return graph
示例9: registerplugins
def registerplugins():
"""
Register plugins.
If setuptools is used to install rdflib-sqlalchemy, all the provided
plugins are registered through entry_points. This is strongly recommended.
However, if only distutils is available, then the plugins must be
registed manually.
This method will register all of the rdflib-sqlalchemy Store plugins.
"""
from rdflib.store import Store
from rdflib import plugin
try:
x = plugin.get("SQLAlchemy", Store)
del x
return # plugins already registered
except:
pass # must register plugins
# Register the plugins ...
plugin.register(
"SQLAlchemy",
Store,
"rdflib_sqlalchemy.store",
"SQLAlchemy",
)
示例10: testAggregateRaw
def testAggregateRaw():
memStore = plugin.get('IOMemory',Store)()
graph1 = Graph(memStore)
graph2 = Graph(memStore)
graph3 = Graph(memStore)
for n3Str,graph in [(testGraph1N3,graph1),
(testGraph2N3,graph2),
(testGraph3N3,graph3)]:
graph.parse(StringIO(n3Str),format='n3')
G = ReadOnlyGraphAggregate([graph1,graph2,graph3])
#Test triples
assert len(list(G.triples((None,RDF.type,None)))) == 4
assert len(list(G.triples((URIRef("http://test/bar"),None,None)))) == 2
assert len(list(G.triples((None,URIRef("http://test/d"),None)))) == 3
#Test __len__
assert len(G) == 8
#Test __contains__
assert (URIRef("http://test/foo"),RDF.type,RDFS.Resource) in G
barPredicates = [URIRef("http://test/d"),RDFS.isDefinedBy]
assert len(list(G.triples_choices((URIRef("http://test/bar"),barPredicates,None)))) == 2
示例11: process_request
def process_request(self, request):
request.store = plugin.get(settings.STORE['TYPE'], Store)(
URIRef(settings.STORE['ID'])
if 'ID' in settings.STORE else None,
Literal(settings.STORE['CONFIG'])
if 'CONFIG' in settings.STORE else None)
return None
示例12: get_rdflib_serializer
def get_rdflib_serializer(name, media_type, plugin_name):
rdflib_serializer = plugin.get(plugin_name, Serializer)
return type(name,
(RDFLibSerializer,),
{'plugin_name': plugin_name,
'media_type': media_type,
'rdflib_serializer': rdflib_serializer})
示例13: test_concurrent2
def test_concurrent2():
dns = Namespace(u"http://www.example.com/")
store = plugin.get("IOMemory", Store)()
g1 = Graph(store=store)
g2 = Graph(store=store)
g1.add((dns.Name, dns.prop, Literal(u"test")))
g1.add((dns.Name, dns.prop, Literal(u"test2")))
g1.add((dns.Name, dns.prop, Literal(u"test3")))
n = len(g1)
i = 0
for t in g1.triples((None, None, None)):
i+=1
g2.add(t)
# next line causes problems because it adds a new Subject that needs
# to be indexed in __subjectIndex dictionary in IOMemory Store.
# which invalidates the iterator used to iterate over g1
g2.add((dns.Name1, dns.prop1, Literal(u"test")))
g2.add((dns.Name1, dns.prop, Literal(u"test")))
g2.add((dns.Name, dns.prop, Literal(u"test4")))
assert i == n
示例14: main
def main():
# root = tk.Tk()
#root.withdraw()
#inFile = filedialog.askopenfilename()
pathf="/Users/patrick/3cixty/IN/RM/"
inFile = pathf+"bus-stops-10-06-15.csv"
outFile=pathf+"bus.ttl"
csv=readCsv(inFile)
next(csv, None) #FILE WITH HEADERS
store = plugin.get('IOMemory', Store)()
g = Graph(store)
graph = ConjunctiveGraph(store)
prefixes=definePrefixes()
print('Binding Prefixes')
bindingPrefixes(graph,prefixes)
print('Creating graph...')
for row in csv:
lstData = createRDF(row)
createGraph(lstData,g)
createGraph(lstData,g).serialize(outFile,format='turtle')
nzip = pathf+time.strftime("%Y-%m-%d")+'.zip'
zf = zipfile.ZipFile(nzip, mode='w')
try:
print ('Creating zip file...')
zf.write(outFile)
finally:
zf.close()
print ('DONE!')
示例15: registerplugins
def registerplugins():
"""
If rdfextras is installed with setuptools, all plugins are registered
through entry_points. This is strongly recommended.
If only distutils is available, the plugins must be registed manually
This method will register all rdfextras plugins
"""
from rdflib import plugin
from rdflib.query import Processor
try:
x=plugin.get('sparql',Processor)
return # plugins already registered
except:
pass # must register plugins
from rdflib.query import ResultParser, ResultSerializer, Result
plugin.register('sparql', Result,
'rdfextras.sparql.query', 'SPARQLQueryResult')
plugin.register('sparql', Processor,
'rdfextras.sparql.processor', 'Processor')
plugin.register('html', ResultSerializer,
'rdfextras.sparql.results.htmlresults', 'HTMLResultSerializer')
plugin.register('xml', ResultSerializer,
'rdfextras.sparql.results.xmlresults', 'XMLResultSerializer')
plugin.register('json', ResultSerializer,
'rdfextras.sparql.results.jsonresults', 'JSONResultSerializer')
plugin.register('xml', ResultParser,
'rdfextras.sparql.results.xmlresults', 'XMLResultParser')
plugin.register('json', ResultParser,
'rdfextras.sparql.results.jsonresults', 'JSONResultParser')