本文整理汇总了Python中rdflib.ConjunctiveGraph.addN方法的典型用法代码示例。如果您正苦于以下问题:Python ConjunctiveGraph.addN方法的具体用法?Python ConjunctiveGraph.addN怎么用?Python ConjunctiveGraph.addN使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rdflib.ConjunctiveGraph
的用法示例。
在下文中一共展示了ConjunctiveGraph.addN方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __store_graph
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def __store_graph(cur_g, rdf_iri_string, d_dir):
try:
res_dir, dest_file = \
find_paths(rdf_iri_string, args.base + os.sep, "https://w3id.org/oc/corpus/", 10000, 1000)
dest_dir = res_dir.replace(args.base + os.sep, d_dir + os.sep)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
cur_file = dest_file.replace(res_dir, dest_dir)
if os.path.exists(cur_file):
c_graph = __load_graph(cur_file)
else:
c_graph = ConjunctiveGraph()
c_graph.remove_context(c_graph.get_context(cur_g.identifier))
c_graph.addN([item + (cur_g.identifier,) for item in list(cur_g)])
with open(dest_file.replace(res_dir, dest_dir), "w") as f:
cur_json_ld = json.loads(c_graph.serialize(format="json-ld", context=context_json))
cur_json_ld["@context"] = context_path
json.dump(cur_json_ld, f, indent=4)
# repok.add_sentence("File '%s' added." % cur_file)
return dest_file
except Exception as e:
reperr.add_sentence("[5] It was impossible to store the RDF statements in %s. %s" %
(dest_file, str(e)))
示例2: test_quad_contexts
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def test_quad_contexts():
g = ConjunctiveGraph()
a = URIRef('urn:a')
b = URIRef('urn:b')
g.get_context(a).add((a, a, a))
g.addN([(b, b, b, b)])
assert set(g) == set([(a, a, a), (b, b, b)])
for q in g.quads():
assert isinstance(q[3], Graph)
示例3: get_where
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def get_where(graph, args):
s, p, o, c = _spoc(args)
result = ConjunctiveGraph()
for subgraph in (
x for x in graph.store.contexts((s, p, o)) if c is None or x.identifier == c
):
result.addN((s, p, o, subgraph.identifier)
for s, p, o in subgraph.triples((None, None, None)))
return result
示例4: uploadDocumentContext
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def uploadDocumentContext(annfile):
docid = annfile.split('/').pop().split('.')[0]
cg = ConjunctiveGraph(identifier=gid['tempUploadGraph'])
cg.addN([(s,p,o,gid[docid]) for (s,p,o) in ann2rdf(annfile)])
r = requests.post(
AGVM_VC_REPO + "/statements",
headers={'Content-Type': 'text/x-nquads'},
data=cg.serialize(format='nquads'),
auth=AG_AUTH,
params={"commit":1000}
)
return r.content
示例5: graphWithoutMetadata
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def graphWithoutMetadata(g, ignorePredicates=[]):
"""
graph filter that removes any statements whose subjects are
contexts in the graph and also any statements with the given
predicates
"""
ctxs = [ctx.identifier for ctx in g.contexts()]
out = ConjunctiveGraph()
for stmt in g.quads((None, None, None)):
if stmt[0] not in ctxs and stmt[1] not in ignorePredicates:
out.addN([stmt])
return out
示例6: _graph
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def _graph(self):
"""Lazy loading of the _graph attribute
This property getter will be called only when the instance attribute self._graph has been deleted.
In that case, it will load the graph from self.identifier.
This is used by the `from_iri`:meth: class method,
to ensure that graphs are only loaded when required...
"""
if '_graph' in self.__dict__:
return self.__dict__['_graph']
headers = self.__dict__.pop('_headers')
http = self.__dict__.pop('_http')
base_iri = self._identifier.split('#', 1)[0]
effective_headers = dict(DEFAULT_REQUEST_HEADERS)
if headers:
effective_headers.update(headers)
http = http or DEFAULT_HTTP_CLIENT
LOG.info('downloading <%s>', base_iri)
response, content = http.request(base_iri, "GET", headers=effective_headers)
LOG.debug('got %s %s %s', response.status, response['content-type'], response.fromcache)
if response.status // 100 != 2:
raise HttpLib2ErrorWithResponse(response.reason, response, content)
source = StringInputSource(content)
ctype = response['content-type'].split(';',1)[0]
g = ConjunctiveGraph(identifier=base_iri)
g.addN(BACKGROUND_KNOWLEDGE.quads())
g.parse(source, base_iri, ctype)
_fix_default_graph(g)
# if available, load API Documentation in a separate graph
links = response.get('link')
if links:
if type(links) != list:
links = [links]
for link in links:
match = APIDOC_RE.match(link)
if match:
self._api_doc = apidoc_iri = URIRef(match.groups()[0])
if apidoc_iri != self.identifier:
apidoc = ApiDocumentation.from_iri(apidoc_iri, headers, http)
g.addN(apidoc.graph.quads())
break
self.__dict__['_graph'] = g
return g
示例7: __init__
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def __init__(self, location, repository, inmemory=False):
super(RDFLibStore, self).__init__(location, repository)
self.inmemory = inmemory
self.closed = False
graphid = URIRef("file://" + self.repository)
g = ConjunctiveGraph(store=self._storeid(), identifier=graphid)
if os.path.exists(self.location):
g.open(self.location, create=False)
else:
g.open(self.location, create=True)
l = logging.getLogger(__name__)
if inmemory:
l.debug("Loading store into memory")
ig = ConjunctiveGraph(identifier=graphid)
ig.addN(g.quads())
g.close()
self.graph = ig
else:
l.debug("Using on-disk store")
self.graph = g
示例8: store
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
def store(self, cur_g, base_dir, base_iri, context_path, tmp_dir=None,
override=False, already_processed={}, store_now=True):
self.repok.new_article()
self.reperr.new_article()
if len(cur_g) > 0:
cur_subject = set(cur_g.subjects(None, None)).pop()
cur_dir_path, cur_file_path = find_paths(
str(cur_subject), base_dir, base_iri, self.dir_split, self.n_file_item)
try:
if not os.path.exists(cur_dir_path):
os.makedirs(cur_dir_path)
final_g = ConjunctiveGraph()
final_g.addN([item + (cur_g.identifier,) for item in list(cur_g)])
# Merging the data
if not override:
if cur_file_path in already_processed:
stored_g = already_processed[cur_file_path]
stored_g.addN(final_g.quads((None, None, None, None)))
final_g = stored_g
elif os.path.exists(cur_file_path):
# This is a conjunctive graps that contains all the triples (and graphs)
# the file is actually defining - they could be more than those using
# 'cur_subject' as subject.
final_g = self.load(cur_file_path, cur_g, tmp_dir)
already_processed[cur_file_path] = final_g
if store_now:
self.__store_in_file(final_g, cur_file_path, context_path)
return already_processed
except Exception as e:
self.reperr.add_sentence("[5] It was impossible to store the RDF statements in %s. %s" %
(cur_file_path, str(e)))
return None
示例9: accident_coverage_triples
# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import addN [as 别名]
txy_list.append((t, x, y))
accident_url_list.append(ident)
yield from accident_coverage_triples(txy_list, accident_url_list)
parser = ArgumentParser()
parser.add_argument(
'--tweets', type=FileType('r'), default='data/tweets.json'
)
parser.add_argument(
'--streets', type=FileType('r'), default='data/streets.json'
)
parser.add_argument(
'--out', type=FileType('wb'), default='data/accidents.ttl'
)
if __name__ == '__main__':
args = parser.parse_args()
streets = load(args.streets)
tweets = load(args.tweets)
print('Loaded {} tweets and {} streets'.format(len(tweets), len(streets)))
fuseki = ConjunctiveGraph(store='SPARQLUpdateStore')
fuseki.open(('http://localhost:3030/accidents/query',
'http://localhost:3030/accidents/update'))
default = 'urn:x-arq:DefaultGraph'
add_namespaces(fuseki)
fuseki.addN((s,p,o,default) for s,p,o in build_graph(tweets))
fuseki.close()