本文整理汇总了Python中rdflib.util.from_n3函数的典型用法代码示例。如果您正苦于以下问题:Python from_n3函数的具体用法?Python from_n3怎么用?Python from_n3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_n3函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_util_from_n3_expectliteralwithdatatypefrombool
def test_util_from_n3_expectliteralwithdatatypefrombool(self):
s = 'true'
res = util.from_n3(s)
self.assertEqual(res, Literal(True))
s = 'false'
res = util.from_n3(s)
self.assertEqual(res, Literal(False))
示例2: get_semantic_associations
def get_semantic_associations(fn=None, limit=None):
if not fn:
verified_mappings = get_verified_mappings()
semantic_associations = get_dbpedia_pairs_from_mappings(
verified_mappings)
semantic_associations = [URIRefify(p) for p in semantic_associations]
else:
semantic_associations = []
with gzip.open(fn) if fn.endswith('.gz') else open(fn) as f:
# expects a file with one space separated pair of n3 encoded IRIs
# per line
r = csv.DictReader(
f,
delimiter=b' ',
doublequote=False,
escapechar=None,
quoting=csv.QUOTE_NONE,
)
assert r.fieldnames == ['source', 'target']
for i, row in enumerate(r):
if limit and i >= limit:
break
source = from_n3(row['source'].decode('UTF-8'))
target = from_n3(row['target'].decode('UTF-8'))
semantic_associations.append((source, target))
return semantic_associations
示例3: test_util_from_n3_expectpartialidempotencewithn3
def test_util_from_n3_expectpartialidempotencewithn3(self):
for n3 in ('<http://ex.com/foo>',
'"foo"@de',
#'"\\""', # exception as '\\"' --> '"' by orig parser as well
'"""multi\n"line"\nstring"""@en'):
self.assertEqual(util.from_n3(n3).n3(), n3,
'from_n3(%(n3e)r).n3() != %(n3e)r' % {'n3e': n3})
示例4: test_util_from_n3_expectsameasn3parser
def test_util_from_n3_expectsameasn3parser(self):
def parse_n3(term_n3):
''' Disclaimer: Quick and dirty hack using the n3 parser. '''
prepstr = ("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
"<urn:no_use> <urn:no_use> %s.\n" % term_n3)
g = ConjunctiveGraph()
g.parse(data=prepstr, format='n3')
return [t for t in g.triples((None, None, None))][0][2]
for n3 in (# "michel", # won't parse in original parser
# "_:michel", # BNodes won't be the same
'"michel"',
'<http://example.org/schema>',
'"michel"@fr',
# '"michel"@fr^^xsd:fr', # FIXME: invalid n3, orig parser will prefer datatype
# '"true"^^xsd:boolean', # FIXME: orig parser will expand xsd prefix
'42',
'true',
'false',
'"""multi\nline\nstring"""@en',
'<http://ex.com/foo>',
'"foo"@de',
'"\\""@en',
'"""multi\n"line"\nstring"""@en'):
res, exp = util.from_n3(n3), parse_n3(n3)
self.assertEquals(res, exp,
'from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r' % {
'res': res, 'exp': exp, 'n3e':n3})
示例5: lookup_blanks
def lookup_blanks(self, g, bn, conn):
"""Recursively find any relevant blank nodes for
the current lookup
@param g The graph
@param bn The blank node ID (starting _:)
@param conn The database connection
"""
cursor = conn.cursor()
cursor.execute("""select subject, property, object from triples where
page="<BLANK>" """, (bn[2:],))
rows = cursor.fetchall()
if rows:
for s, p, o in rows:
g.add((from_n3(s), from_n3(p), from_n3(o)))
if o.startswith("_:"):
self.lookup_blanks(g, o, conn)
cursor.close()
示例6: ask_NODE
def ask_NODE(self, g, sections, var, prompt):
answer = self.input(prompt)
if answer.startswith("c") and var.classhint and var.classhint in sections:
s = sections[answer[1:].strip()]
node = s.construct(g, sections, None)
print("back to {}".format(self.name), file=self.out)
return node
elif answer:
return util.from_n3(answer)
else:
return None
示例7: summarize
def summarize(self, id):
"""Summarize an id
@param id The id
@return A RDFlib Graph or None if the ID is not found
"""
g = ConjunctiveGraph()
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
cursor.execute(
"""select subject, property, object from triples where
subject=?""", ("<%s%s>" % (BASE_NAME, unicode_escape(id)),))
rows = cursor.fetchall()
added = 0
if rows:
for s, p, o in rows:
for f in FACETS:
if added < 20 and str(p)[1:-1] == f["uri"]:
g.add((from_n3(s), from_n3(p), from_n3(o)))
added += 1
conn.close()
return g
示例8: lookup
def lookup(self, id):
"""Resolve a single id
@param id The id
@return A RDFlib Graph or None if the ID is not found
"""
g = ConjunctiveGraph()
g.bind("lemon", "http://lemon-model.net/lemon#")
g.bind("owl", str(OWL))
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
cursor.execute(
"""select subject, property, object from triples where
page=?""", (unicode_escape(id),))
rows = cursor.fetchall()
if rows:
for s, p, o in rows:
g.add((from_n3(s), from_n3(p), from_n3(o)))
if o.startswith("_:"):
self.lookup_blanks(g, o, conn)
conn.close()
return g
else:
return None
示例9: list_values
def list_values(self, offset, limit, prop):
"""
Produce a list of all possible values for a particular property
@param offset Where to start listing
@param limit Number of values to list
@param prop The property to list for
@return A tuple consisting of a boolean indicating if there are more
results and list of values that exist (as N3)
"""
conn = sqlite3.connect(self.db)
cursor = conn.cursor()
if not offset:
offset = 0
cursor.execute("""SELECT DISTINCT object, obj_label, count(*)
FROM triples WHERE property=? AND head=0
GROUP BY oid ORDER BY count(*) DESC
LIMIT ? OFFSET ?""", (prop, limit + 1, offset))
row = cursor.fetchone()
n = 0
results = []
while n < limit and row:
obj, label, count = row
n3 = from_n3(obj)
if type(n3) == Literal:
results.append({'link': obj, 'label': n3.value,
'count': count})
elif type(n3) == URIRef:
# u = self.unname(str(n3))
# if u:
# s, _ = u
if label:
results.append({'link': obj, 'label': label,
'count': count})
else:
# results.append({'link': obj, 'label': s,
# 'count': count})
# else:
results.append({'link': obj,
'label': yuzu.displayer.DISPLAYER.apply(
str(n3)),
'count': count})
n += 1
row = cursor.fetchone()
conn.close()
return n == limit, results
示例10: srtsx_body2
def srtsx_body2(r, vars):
for v in vars:
val = from_n3(r[vars.index(v)])
if isinstance(val, URIRef):
yield (" <binding name=\"%s\"><uri>%s</uri></binding>"
% (v, str(val)))
elif isinstance(val, BNode):
yield (" <binding name=\"%s\"><bnode>%s</bnode></binding>"
% (v, str(val)))
elif val.language:
yield (" <binding name=\"%s\"><literal xml:lang=\"%s\">"
"%s</literal></binding>" % (v, val.language, str(val)))
elif val.datatype:
yield(" <binding name=\"%s\"><literal datatype=\"%s\">"
"%s</literal></binding>" % (v, val.datatype, str(val)))
else:
yield(" <binding name=\"%s\"><literal>%s</literal></binding>"
% (v, str(val)))
示例11: srtsj_body2
def srtsj_body2(r, vars):
for v in vars:
val = from_n3(r[vars.index(v)])
if not val:
yield ""
if isinstance(val, URIRef):
yield (" \"%s\": { \"type\": \"uri\", \"value\": \"%s\" }"
% (v, str(val)))
elif isinstance(val, BNode):
yield (" \"%s\": { \"type\": \"bnode\", \"value\": \"%s\" }"
% (v, str(val)))
elif val.language:
yield (" \"%s\": { \"type\": \"literal\", \"xml:lang\": "
"\"%s\", \"value\": \"%s\" }" % (v, val.language, str(val)))
elif val.datatype:
yield (" \"%s\": { \"type\": \"literal\", \"datatype\": "
"\"%s\", \"value\": \"%s\" }" % (v, val.datatype,
str(val)))
else:
yield (" \"%s\": { \"type\": \"literal\", \"value\": \"%s\" }"
% (v, str(val)))
示例12: entry
#.........这里部分代码省略.........
#cursor.execute("select * from senses where wordid=? and casedwordid is NULL", (word_id,))
cursor.execute("select * from senses where wordid=?", (word_id,))
else:
cursor.execute("select casedwordid from casedwords where cased=?",(cased_lemma,))
row = cursor.fetchone()
if row is None:
return None
casedwordid, = row
cursor.execute("select * from senses where casedwordid=?", (casedwordid,))
for _, casedwordid, synsetid, senseid, sensenum, lexid, tagcount, old_sensekey, sensekey in cursor.fetchall():
# NB. This could also be achieved by querying "casedwordid is NULL" however
# this is significantly slower, so we filter in Python checking we return cased
# forms only for cased lemmas
if cased_lemma.islower() == bool(casedwordid):
continue
if sensekey[-1] == pos:
this_pos_found = True
_, sensekey2 = sensekey.split('#')
sense_uri = entry_name(cased_lemma, pos, sensekey2)
graph.add((entry_uri, lemon.sense, sense_uri))
graph.add((sense_uri, RDF.type, lemon.LexicalSense))
graph.add((sense_uri, lemon.reference, synset_name(context, synsetid, pos)))
graph.add((sense_uri, wn_ontology.sense_number, Literal(sensenum)))
graph.add((sense_uri, wn_ontology.tag_count, Literal(tagcount)))
graph.add((sense_uri, wn_ontology.lex_id, Literal(lexid)))
graph.add((sense_uri, wn_ontology.old_sense_key, Literal(old_sensekey)))
# Now adjective positions
cursor.execute("select position from adjpositions where synsetid=? and wordid=?", (synsetid, word_id))
rows = cursor.fetchall()
for position, in rows:
graph.add((sense_uri, wn_ontology.adjposition,
URIRef(wn_ontology.term(quote_plus(context.adjposition_names[position])))))
# Add definition also to sense
cursor.execute("select definition from synsets where synsetid=?", (synsetid,))
for definition, in cursor.fetchall():
graph.add((sense_uri, wn_ontology.gloss, Literal(definition, lang=context.lang)))
# Sense links
cursor.execute("select senseid2, linkid from lexlinks where senseid1=?", (senseid,))
for senseid2, linkid in cursor.fetchall():
cursor.execute("select sensekey from senses where senseid=?", (senseid2,))
sensekey3, = cursor.fetchone()
sense2_lemma, sense2_key = sensekey3.split('#')
pos2 = sensekey3[-1]
sense_uri2 = entry_name(sense2_lemma, pos2, sense2_key)
graph.add((sense_uri, wn_ontology.term(context.linktypes[linkid]), sense_uri2))
# Verb frames (maybe only if pos=='v'?)
cursor.execute("select sentenceid from vframesentencemaps where synsetid=? and wordid=?",
(synsetid, word_id))
for sentenceid, in cursor.fetchall():
graph.add((sense_uri, wn_ontology.verb_frame_sentence,
Literal(context.vframesentences[sentenceid], lang=context.lang)))
# Sense tags
cursor.execute("select position, senseid from sensetags inner join taggedtexts on sensetags.sensetagid=taggedtexts.sensetagid where new_sensekey=?",(sensekey,)) # unindexed
for position, senseid in cursor.fetchall():
cursor.execute("select sensekey from senses where senseid=?",(senseid,))
for sensekey, in cursor.fetchall():
if position:
comp_uri = entry_name(sensekey[0:sensekey.index('#')].replace("_"," "),sensekey[-1],'Component-' + str(position+1))
graph.add((sense_uri, wn_ontology.sense_tag, comp_uri))
# LexVo Link
graph.add((sense_uri, OWL.sameAs, translate_to_lexvo(old_sensekey, pos)))
if not this_pos_found:
return None
if pos == "p":
words = lemma.split(" ")
node = BNode()
comp1 = entry_name(lemma, pos, "Component-1")
graph.add((entry_uri, lemon.decomposition, node))
graph.add((node, RDF.first, comp1))
graph.add((comp1, RDFS.label, Literal(words[0], lang=context.lang)))
graph.add((comp1, RDF.type, lemon.Component))
for idx in range(1,len(words)):
node2 = BNode()
graph.add((node, RDF.rest, node2))
node = node2
comp_uri = entry_name(lemma, pos, "Component-" + str(idx + 1))
graph.add((node, RDF.first, comp_uri))
graph.add((comp_uri, RDFS.label, Literal(words[idx], lang=context.lang)))
graph.add((comp_uri, RDF.type, lemon.Component))
graph.add((node, RDF.rest, RDF.nil))
try:
cursor.execute("select fragment, property, object from entrytriples where lemma=?",(quote_plus(lemma)+"-"+pos,))
for f, p, o in cursor.fetchall():
graph.add((entry_name(lemma,pos,f), from_n3(p), from_n3(o)))
except:
pass
return graph
示例13: test_util_from_n3_expectliteralwithescapedquote
def test_util_from_n3_expectliteralwithescapedquote(self):
s = '"\\""'
res = util.from_n3(s, default=None, backend=None)
self.assert_(res, Literal('\\"', lang='en'))
示例14: test_util_from_n3_expectliteralmultiline
def test_util_from_n3_expectliteralmultiline(self):
s = '"""multi\nline\nstring"""@en'
res = util.from_n3(s, default=None, backend=None)
self.assert_(res, Literal('multi\nline\nstring', lang='en'))
示例15: test_util_from_n3_expectliteralwithtrailingbackslash
def test_util_from_n3_expectliteralwithtrailingbackslash(self):
s = '"trailing\\\\"^^<http://www.w3.org/2001/XMLSchema#string>'
res = util.from_n3(s)
self.assert_(res, Literal('trailing\\', datatype=XSD['string']))
self.assert_(res.n3(), s)