本文整理汇总了Python中askomics.libaskomics.rdfdb.QueryLauncher.QueryLauncher.update_query_insert_data方法的典型用法代码示例。如果您正苦于以下问题:Python QueryLauncher.update_query_insert_data方法的具体用法?Python QueryLauncher.update_query_insert_data怎么用?Python QueryLauncher.update_query_insert_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类askomics.libaskomics.rdfdb.QueryLauncher.QueryLauncher
的用法示例。
在下文中一共展示了QueryLauncher.update_query_insert_data方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data_into_graph
# 需要导入模块: from askomics.libaskomics.rdfdb.QueryLauncher import QueryLauncher [as 别名]
# 或者: from askomics.libaskomics.rdfdb.QueryLauncher.QueryLauncher import update_query_insert_data [as 别名]
def load_data_into_graph(self):
""" Convert tabulated files to turtle according to the type of the columns set by the user """
data = {}
sfc = SourceFileConvertor(self.settings, self.request.session)
body = self.request.json_body
file_name = body["file_name"]
col_types = body["col_types"]
limit = body["limit"]
start_position_list = []
attribute_list_output = []
request_output_has_header_domain = {}
request_output_domain = []
request_abstraction_output = []
missing_headers, new_headers, present_headers, attribute_code, relation_code, domain_code = sfc.get_turtle(file_name, col_types, limit, start_position_list, attribute_list_output, request_output_has_header_domain, request_output_domain, request_abstraction_output)
ql = QueryLauncher(self.settings, self.request.session)
if not limit:
# use insert data instead of load sparql procedure when a dataset is small.....
if len(attribute_list_output)+len(request_abstraction_output)+len(request_output_domain) > 200:
first = True
keep_going = True
data["temp_ttl_file"] = []
while keep_going:
with tempfile.NamedTemporaryFile(dir="askomics/ttl/", suffix=".ttl", mode="w", delete=False) as fp:
# Temp files are removed by clean_ttl_directory route
l_empty = []
ql.build_query_load(l_empty, fp, header=True)
if first:
for header_sparql_request in request_output_has_header_domain.values():
if len(header_sparql_request) > 0:
self.log.info("header_sparql_request - Number of object:"+str(len(header_sparql_request)))
ql.build_query_load(header_sparql_request, fp)
if len(request_abstraction_output) > 0:
self.log.info("request_abstraction_output - Number of object:"+str(len(request_abstraction_output)))
ql.build_query_load(request_abstraction_output, fp)
if len(request_output_domain) > 0:
self.log.info("request_output_domain - Number of object:"+str(len(request_output_domain)))
ql.build_query_load(request_output_domain, fp)
if len(start_position_list) > 0:
self.log.info("start_position_list - Number of object:"+str(len(start_position_list)))
ql.build_query_load(start_position_list, fp)
first = False
if len(attribute_list_output) > 0:
subattribute_list_output = attribute_list_output[:min(60000, len(attribute_list_output))]
del attribute_list_output[:min(60000, len(attribute_list_output))]
if len(attribute_list_output) == 0:
keep_going = False
self.log.info("subattribute_list_output - Number of object:"+str(len(subattribute_list_output)))
ql.build_query_load(subattribute_list_output, fp)
else:
keep_going = False
urlbase = re.search(r'(http:\/\/.*)\/.*', self.request.current_route_url())
ql.update_query_load(fp, urlbase.group(1))
data["temp_ttl_file"].append(fp.name)
else:
self.log.info(" ===> insert update nb object:"+str(len(attribute_list_output)+len(request_abstraction_output)+len(request_output_domain)))
for header_sparql_request in request_output_has_header_domain.values():
if len(header_sparql_request) > 0:
ql.update_query_insert_data(header_sparql_request)
if len(request_abstraction_output) > 0:
ql.update_query_insert_data(request_abstraction_output)
if len(request_output_domain) > 0:
ql.update_query_insert_data(request_output_domain)
if len(start_position_list) > 0:
ql.update_query_insert_data(start_position_list)
if len(attribute_list_output) > 0:
ql.update_query_insert_data(attribute_list_output)
#f = open('askomics/ttl/Insert.ttl', 'r')
#urlbase = m = re.search('(http:\/\/.*)\/.*', self.request.current_route_url())
#ql.update_query_load(f,urlbase.group(1))
data["missing_headers"] = missing_headers
data["new_headers"] = new_headers
data["present_headers"] = present_headers
data["attribute_code"] = attribute_code
data["relation_code"] = relation_code
data["domain_code"] = domain_code
return data