本文整理汇总了Python中askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder.header_sparql_config方法的典型用法代码示例。如果您正苦于以下问题:Python SparqlQueryBuilder.header_sparql_config方法的具体用法?Python SparqlQueryBuilder.header_sparql_config怎么用?Python SparqlQueryBuilder.header_sparql_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder
的用法示例。
在下文中一共展示了SparqlQueryBuilder.header_sparql_config方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: insert_metadatas
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def insert_metadatas(self,accessL):
"""
Insert the metadatas into the parent graph
"""
self.log.debug('--- insert_metadatas ---')
sqb = SparqlQueryBuilder(self.settings, self.session)
query_laucher = QueryLauncher(self.settings, self.session)
valAcces = 'public' if accessL else 'private'
ttl = '<' + self.graph + '> prov:generatedAtTime "' + self.timestamp + '"^^xsd:dateTime .\n'
ttl += '<' + self.graph + '> dc:creator "' + self.session['username'] + '" .\n'
ttl += '<' + self.graph + '> :accessLevel "' + valAcces + '" .\n'
ttl += '<' + self.graph + '> foaf:Group "' + self.session['group'] + '" .\n'
ttl += '<' + self.graph + '> prov:wasDerivedFrom "' + self.name + '" .\n'
ttl += '<' + self.graph + '> dc:hasVersion "' + get_distribution('Askomics').version + '" .\n'
ttl += '<' + self.graph + '> prov:describesService "' + os.uname()[1] + '" .\n'
if self.is_defined("askomics.endpoint"):
ttl += '<' + self.graph + '> prov:atLocation "' + self.get_param("askomics.endpoint") + '" .\n'
else:
raise ValueError("askomics.endpoint does not exit.")
sparql_header = sqb.header_sparql_config('')
query_laucher.insert_data(ttl, self.graph, sparql_header)
示例2: get_metadatas
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def get_metadatas(self):
"""
Create metadatas and insert them into AskOmics main graph.
"""
self.log.debug("====== INSERT METADATAS ======")
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
ttlMetadatas = "<" + self.metadatas['graphName'] + "> " + "prov:generatedAtTime " + '"' + self.metadatas['loadDate'] + '"^^xsd:dateTime .\n'
ttlMetadatas += "<" + self.metadatas['graphName'] + "> " + "dc:creator " + '"' + self.metadatas['username'] + '"^^xsd:string .\n'
ttlMetadatas += "<" + self.metadatas['graphName'] + "> " + "prov:wasDerivedFrom " + '"' + self.metadatas['fileName'] + '"^^xsd:string .\n'
ttlMetadatas += "<" + self.metadatas['graphName'] + "> " + "dc:hasVersion " + '"' + self.metadatas['version'] + '"^^xsd:string .\n'
ttlMetadatas += "<" + self.metadatas['graphName'] + "> " + "prov:describesService " + '"' + self.metadatas['server'] + '"^^xsd:string .'
sparqlHeader = sqb.header_sparql_config("")
ql.insert_data(ttlMetadatas, self.get_param("askomics.graph"), sparqlHeader)
示例3: load_data_from_file
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def load_data_from_file(self, fp, urlbase):
"""
Load a locally created ttl file in the triplestore using http (with load_data(url)) or with the filename for Fuseki (with fuseki_load_data(fp.name)).
:param fp: a file handle for the file to load
:param urlbase:the base URL of current askomics instance. It is used to let triple stores access some askomics temporary ttl files using http.
:return: a dictionnary with information on the success or failure of the operation
"""
if not fp.closed:
fp.flush() # This is required as otherwise, data might not be really written to the file before being sent to triplestore
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
graphName = "askomics:graph:" + self.name + '_' + self.timestamp
self.metadatas['graphName'] = graphName
ttlNamedGraph = "<" + graphName + "> " + "rdfg:subGraphOf" + " <" + self.get_param("askomics.graph") + "> ."
sparqlHeader = sqb.header_sparql_config("")
ql.insert_data(ttlNamedGraph, self.get_param("askomics.graph"), sparqlHeader)
url = urlbase+"/ttl/"+os.path.basename(fp.name)
self.log.debug(url)
data = {}
try:
if self.is_defined("askomics.file_upload_url"):
queryResults = ql.upload_data(fp.name, graphName)
self.metadatas['server'] = queryResults.headers['Server']
self.metadatas['loadDate'] = self.timestamp
else:
queryResults = ql.load_data(url, graphName)
self.metadatas['server'] = queryResults.info()['server']
self.metadatas['loadDate'] = self.timestamp
data['status'] = 'ok'
except Exception as e:
self._format_exception(e, data=data)
finally:
if self.settings["askomics.debug"]:
data['url'] = url
else:
os.remove(fp.name) # Everything ok, remove temp file
self.get_metadatas()
return data
示例4: importMoSate
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def importMoSate(self,mo,state):
'''
Import in the TPS all triplet necessary to defined an askomics module
'''
rdf = ":"+self.escape['entity'](mo['module'])+" rdfs:label " + self.escape['text'](mo['module'])+";\n"
rdf += " rdfs:comment " + self.escape['text'](mo['comment'])+";\n"
rdf += " :module_version " + self.escape['text'](mo['version'])+";\n"
rdf += " :module_state " + self.escape['text'](state)+""
if (state == 'ok'):
rdf += ";\n :module_graph " + '<'+mo['graph']+'>.\n'
else:
rdf += ".\n"
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
sh = sqb.header_sparql_config('')
ql.insert_data(rdf, self.graph_modules , sh)
示例5: build_sparql_query_from_json
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def build_sparql_query_from_json(self,variates,constraintesRelations,limit,sendRequestToTPS):
self.log.debug("variates")
self.log.debug(variates)
self.log.debug("constraintesRelations")
self.log.debug(constraintesRelations)
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
res = ql.execute_query(sqb.get_list_named_graphs().query)
namedGraphs = []
#for indexResult in range(len(res['results']['bindings'])):
# namedGraphs.append(res['results']['bindings'][indexResult]['g']['value'])
req = ""
req += "SELECT DISTINCT "+' '.join(variates)+"\n"
#TODO OFI: External Service do not work and, anyway, graphes have to be selectionned by the user in the UI
#
#for graph in namedGraphs:
# req += "FROM "+ "<"+graph+ ">"+"\n"
req += "WHERE \n"
req += self.buildRecursiveBlock('',constraintesRelations)
if limit != None and limit >0 :
req +=" LIMIT "+str(limit)
sqb = SparqlQueryBuilder(self.settings, self.session)
prefixes = sqb.header_sparql_config(req)
query = prefixes+req
results = {}
if sendRequestToTPS:
ql = QueryLauncher(self.settings, self.session)
results = ql.process_query(query)
else:
# add comment inside query to inform user
query = "# endpoint = "+self.get_param("askomics.endpoint") + "\n" + query
return results,query
示例6: build_sparql_query_from_json2
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def build_sparql_query_from_json2(self, variates, constraintes_relations, limit, send_request_to_TPS):
"""
build a sparql query from json
"""
self.log.debug("variates")
self.log.debug(variates)
self.log.debug("constraintes_relations")
self.log.debug(constraintes_relations)
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
req = ""
req += "SELECT DISTINCT "+' '.join(variates)+"\n"
#TODO OFI: External Service do not work and, anyway, graphes have to be selectionned by the user in the UI
#
#for graph in namedGraphs:
# req += "FROM "+ "<"+graph+ ">"+"\n"
req += "WHERE \n"
req += self.build_recursive_block('', constraintes_relations)
if limit != None and limit >0 :
req +=" LIMIT "+str(limit)
sqb = SparqlQueryBuilder(self.settings, self.session)
prefixes = sqb.header_sparql_config(req)
query = prefixes+req
results = {}
if send_request_to_TPS:
ql = QueryLauncher(self.settings, self.session)
results = ql.process_query(query)
else:
# add comment inside query to inform user
query = "# endpoint = "+self.get_param("askomics.endpoint") + "\n" + query
return results, query
示例7: persist
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def persist(self, urlbase,method):
"""
Store the current source file in the triple store
:param urlbase: the base URL of current askomics instance. It is used to let triple stores access some askomics temporary ttl files using http.
:return: a dictionnary with information on the success or failure of the operation
:rtype: Dict
"""
content_ttl = self.get_turtle()
ql = QueryLauncher(self.settings, self.session)
# use insert data instead of load sparql procedure when the dataset is small
total_triple_count = 0
chunk_count = 1
chunk = ""
pathttl = self.get_ttl_directory()
if method == 'load':
fp = None
triple_count = 0
for triple in content_ttl:
chunk += triple + '\n'
triple_count += 1
if triple_count > int(self.settings['askomics.max_content_size_to_update_database']):
# Temp file must be accessed by http so we place it in askomics/ttl/ dir
fp = tempfile.NamedTemporaryFile(dir=pathttl, prefix="tmp_"+self.metadatas['fileName'], suffix=".ttl", mode="w", delete=False)
# We have reached the maximum chunk size, load it and then we will start a new chunk
self.log.debug("Loading ttl chunk %s file %s" % (chunk_count, fp.name))
header_ttl = self.get_turtle_template(chunk)
fp.write(header_ttl + '\n')
fp.write(chunk)
fp.close()
data = self.load_data_from_file(fp, urlbase)
if data['status'] == 'failed':
return data
chunk = ""
total_triple_count += triple_count
triple_count = 0
chunk_count += 1
# Load the last chunk
if triple_count > 0:
self.log.debug("Loading ttl chunk %s (last)" % (chunk_count))
fp = tempfile.NamedTemporaryFile(dir=pathttl, prefix="tmp_"+self.metadatas['fileName'], suffix=".ttl", mode="w", delete=False)
header_ttl = self.get_turtle_template(chunk)
fp.write(header_ttl + '\n')
fp.write(chunk)
fp.close()
data = self.load_data_from_file(fp, urlbase)
if data['status'] == 'failed':
return data
os.remove(fp.name) # Everything ok, remove previous temp file
total_triple_count += triple_count
# Data is inserted, now insert the abstraction
# We get the abstraction now as we need first to parse the whole file to have category_values
abstraction_ttl = self.get_abstraction()
domain_knowledge_ttl = self.get_domain_knowledge()
header_ttl = self.get_turtle_template(abstraction_ttl+"\n"+domain_knowledge_ttl)
fp = tempfile.NamedTemporaryFile(dir=pathttl, prefix="tmp_"+self.metadatas['fileName'], suffix=".ttl", mode="w", delete=False)
fp.write(header_ttl + '\n')
fp.write(abstraction_ttl + '\n')
fp.write(domain_knowledge_ttl + '\n')
self.log.debug("Loading ttl abstraction file %s" % (fp.name))
fp.close()
data = self.load_data_from_file(fp, urlbase)
if data['status'] == 'failed':
return data
data['total_triple_count'] = total_triple_count
os.remove(fp.name)
else:
sqb = SparqlQueryBuilder(self.settings, self.session)
graphName = "askomics:graph:" + self.name + '_' + self.timestamp
triple_count = 0
chunk = ""
for triple in content_ttl:
chunk += triple + '\n'
triple_count += 1
if triple_count > int(self.settings['askomics.max_content_size_to_update_database']) / 10: # FIXME the limit is much lower than for load
# We have reached the maximum chunk size, load it and then we will start a new chunk
self.log.debug("Inserting ttl chunk %s" % (chunk_count))
try:
header_ttl = sqb.header_sparql_config(chunk)
queryResults = ql.insert_data(chunk, graphName, header_ttl)
#.........这里部分代码省略.........
示例8: test_generateAbstractAskomicsRDF
# 需要导入模块: from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder [as 别名]
# 或者: from askomics.libaskomics.rdfdb.SparqlQueryBuilder.SparqlQueryBuilder import header_sparql_config [as 别名]
def test_generateAbstractAskomicsRDF(self):
import os
from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder
from askomics.libaskomics.rdfdb.QueryLauncher import QueryLauncher
m = ModulesManager(self.settings, self.request.session)
sqb = SparqlQueryBuilder(self.settings, self.request.session)
ql = QueryLauncher(self.settings, self.request.session)
sh = sqb.header_sparql_config('')
rdf = """
<http://bidon/relationTest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#ObjectProperty> ;
<http://www.w3.org/2000/01/rdf-schema#label> "relationBidon" ;
<http://www.w3.org/2000/01/rdf-schema#domain> <http://bidon/Type1> ;
<http://www.w3.org/2000/01/rdf-schema#range> <http://bidon/Type2>.
<http://bidon/Type1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://bidon/Type2> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
"""
ql.insert_data(rdf, "urn:test:askomics", sh)
m.generateAbstractAskomicsRDF("urn:test:askomics")
rdf = """
<http://bidon/relationTest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#ObjectProperty> ;
<http://www.w3.org/2000/01/rdf-schema#label> "relationBidon" ;
<http://www.w3.org/2000/01/rdf-schema#domain> <http://bidon/Type1> ;
<http://www.w3.org/2000/01/rdf-schema#range> <http://bidon/Type2>.
<http://bidon/Type1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://bidon/Type2> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://bidon/Type1> <http://www.w3.org/2000/01/rdf-schema#label> "Type1".
<http://bidon/Type2> <http://www.w3.org/2000/01/rdf-schema#label> "Type2".
<http://bidon/Attribute1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#DatatypeProperty> ;
<http://www.w3.org/2000/01/rdf-schema#label> "Attribute1";
<http://www.w3.org/2000/01/rdf-schema#domain> <http://bidon/Type1> ;
<http://www.w3.org/2000/01/rdf-schema#range> <http://www.w3.org/2001/XMLSchema#int>.
"""
ql.insert_data(rdf, "urn:test:askomics2", sh)
m.generateAbstractAskomicsRDF("urn:test:askomics2")
rdf = """
<http://bidon/relationTest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#ObjectProperty> ;
<http://www.w3.org/2000/01/rdf-schema#label> "relationBidon" ;
<http://www.w3.org/2000/01/rdf-schema#domain> <http=bidon=Type1> ;
<http://www.w3.org/2000/01/rdf-schema#range> <http=bidon=Type2>.
<http=bidon=Type1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http=bidon=Type2> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http=bidon=Type1> <http://www.w3.org/2000/01/rdf-schema#label> "Type1".
<http=bidon=Type2> <http://www.w3.org/2000/01/rdf-schema#label> "Type2".
<http://bidon/Attribute1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#DatatypeProperty> ;
<http://www.w3.org/2000/01/rdf-schema#label> "Attribute1";
<http://www.w3.org/2000/01/rdf-schema#domain> <http=bidon=Type1> ;
<http://www.w3.org/2000/01/rdf-schema#range> <http://www.w3.org/2001/XMLSchema#int>.
"""
ql.insert_data(rdf, "urn:test:askomics3", sh)
m.generateAbstractAskomicsRDF("urn:test:askomics3")