当前位置: 首页>>代码示例>>Python>>正文


Python ConjunctiveGraph.subjects方法代码示例

本文整理汇总了Python中rdflib.ConjunctiveGraph.subjects方法的典型用法代码示例。如果您正苦于以下问题:Python ConjunctiveGraph.subjects方法的具体用法?Python ConjunctiveGraph.subjects怎么用?Python ConjunctiveGraph.subjects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在rdflib.ConjunctiveGraph的用法示例。


在下文中一共展示了ConjunctiveGraph.subjects方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_mediator_vocabs

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def get_mediator_vocabs(userid):
    vocabs = {}
    if not os.path.isfile(os.path.join(ag.mediatorsdir, '%s.rdf'%userid)):
        "Cannot find file %s"%os.path.join(ag.mediatorsdir, '%s.rdf'%userid)
        return vocabs
    #Get list of vocabularies created by userid
    graph = Graph()
    graph.parse(os.path.join(ag.mediatorsdir, '%s.rdf'%userid))
    for v in graph.subjects(namespaces['dcterms']['mediator'], None):
        k = v.split('/')[-1]
        svn_src = "http://damssupport.ouls.ox.ac.uk/trac/vocab/browser/trunks/internalVocabularies/%s"%k
        vocabs[k] = (v, svn_src)
    return vocabs
开发者ID:anusharanganathan,项目名称:Vocab-ox,代码行数:15,代码来源:rdf_helper.py

示例2: get_rdf_metadata

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    def get_rdf_metadata(self, uniprot_id):
        """Retrieve RDF metadata for the given UniProt accession.

        XXX Not finished. XML parsing looks to be more straightforward
        """
        from rdflib import ConjunctiveGraph as Graph
        url_base = "%s/uniprot/%s.rdf"
        full_url = url_base % (self._server, uniprot_id)
        graph = Graph()
        with self._get_open_handle(full_url) as in_handle:
            graph.parse(in_handle)
        main_subject = [s for s in list(set(graph.subjects())) if
                s.split('/')[-1] == uniprot_id][0]
        for sub, pred, obj in graph:
            print sub, pred, obj
开发者ID:CosteaPaul,项目名称:bcbb,代码行数:17,代码来源:interpro_domain_summary.py

示例3: get_mediator_details

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def get_mediator_details(userid):
    #Get mediator_details - firstname, lastname, department, email
    details = {}
    details['userid'] = userid
    details['uri'] = None
    details['name'] = None
    details['fname'] = None
    details['lname'] = None
    details['title'] = None
    details['email'] = None
    details['dept'] = []
    if userid.startswith('uuid'):
        userid = get_mediator_account(userid)
        details['userid'] = userid
        if not userid:
            return details
    if not os.path.isfile(os.path.join(ag.mediatorsdir, '%s.rdf'%userid)):
        return details
    graph = Graph()
    graph.parse(os.path.join(ag.mediatorsdir, '%s.rdf'%userid))
    t = ''
    f = ''
    l = ''
    for title in graph.objects(None, namespaces['foaf']['title']):
        if title.strip():
            t = title
            details['title'] = t
    for fname in graph.objects(None, namespaces['foaf']['firstName']):
        if fname.strip():
            f = fname
            details['fname'] = fname
    for lname in graph.objects(None, namespaces['foaf']['lastName']):
        if lname.strip():
            l = lname
            details['lname'] = lname
    details['name'] = "%s %s %s"%(t, f, l)
    details['name'] = details['name'].strip()
    if not details['name']:
        details['name'] = userid
    for email in graph.objects(None, namespaces['foaf']['mbox']):
        details['email'] = email
    for dept in graph.objects(None, namespaces['dcterms']['isPartOf']):
        details['dept'].append(dept)
    for uri in graph.subjects(namespaces['foaf']['account'], None):
        details['uri'] = uri
    return details
开发者ID:anusharanganathan,项目名称:Vocab-ox,代码行数:48,代码来源:rdf_helper.py

示例4: generate

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    def generate(cls, n):
        graph = ConjunctiveGraph()
        load_rdf_file(STORE['utensils'], graph)

        all_uris = set(graph.subjects())
        n = min(n, len(all_uris))
        selected_uris = sample(all_uris, n)

        # On récupère les ustensiles voulus dans le graphe
        selected_triples = chain(*map(graph.triples, ((uri, None, None) for uri in selected_uris)))
        map(rdfSubject.db.add, selected_triples)

        utensils = [Utensil(uri) for uri in selected_uris]

        # On récupère les actions de ces ustensiles
        ActionGenerator.generate(utensils)

        return utensils
开发者ID:poulp,项目名称:randomfoOd,代码行数:20,代码来源:generators.py

示例5: test_get_history

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    def test_get_history(self):
        with open(filepath('test-patch-adds-items.json')) as f:
            patch = f.read()

        with self.client as client:
            res1 = client.patch(
                '/d/',
                data=patch,
                content_type='application/json',
                headers={'Authorization': 'Bearer '
                         + 'NTAwNWViMTgtYmU2Yi00YWMwLWIwODQtMDQ0MzI4OWIzMzc4'})

            patch_url = urlparse(res1.headers['Location']).path

            client.post(
                patch_url + 'messages',
                data='{"message": "Here is my patch"}',
                content_type='application/json',
                headers={'Authorization': 'Bearer '
                         + 'NTAwNWViMTgtYmU2Yi00YWMwLWIwODQtMDQ0MzI4OWIzMzc4'})

            client.post(
                patch_url + 'messages',
                data='{"message": "Looks good to me"}',
                content_type='application/json',
                headers={'Authorization': 'Bearer '
                         + 'ZjdjNjQ1ODQtMDc1MC00Y2I2LThjODEtMjkzMmY1ZGFhYmI4'})

            client.post(
                patch_url + 'merge',
                buffered=True,
                headers={'Authorization': 'Bearer '
                         + 'ZjdjNjQ1ODQtMDc1MC00Y2I2LThjODEtMjkzMmY1ZGFhYmI4'})

            res3 = client.get('/h', headers={'Accept': 'application/ld+json'})
            self.assertEqual(res3.status_code, http.client.SEE_OTHER)
            self.assertEqual(
                urlparse(res3.headers['Location']).path, '/h.jsonld')

            res4 = client.get('/history.jsonld?inline-context')
            self.assertEqual(res4.status_code, http.client.OK)
            self.assertEqual(
                res4.headers['Content-Type'], 'application/ld+json')
            jsonld = res4.get_data(as_text=True)

        g = ConjunctiveGraph()
        g.parse(format='json-ld', data=jsonld)

        # Initial data load
        self.assertIn(  # None means any
            (HOST['h#change-1'], PROV.endedAtTime, None), g)
        self.assertIn(
            (HOST['h#change-1'], PROV.used, HOST['d?version=0']), g)
        self.assertIn(
            (HOST['d?version=0'],
             PROV.specializationOf, HOST['d']), g)
        self.assertIn(
            (HOST['h#change-1'], RDFS.seeAlso, HOST['h#patch-request-1']), g)
        self.assertIn(
            (HOST['h#patch-request-1'], FOAF.page, HOST['patches/1/']), g)
        self.assertNotIn(
            (HOST['h#patch-request-1'],
             AS.replies, HOST['h#patch-request-1-comments']), g)
        self.assertIn(
            (HOST['h#change-1'], PROV.used, HOST['h#patch-1']), g)
        self.assertIn(
            (HOST['h#patch-1'],
             FOAF.page, HOST['patches/1/patch.jsonpatch']), g)
        self.assertIn(
            (HOST['h#change-1'],
             PROV.generated, HOST['d?version=1']), g)
        self.assertIn(
            (HOST['d?version=1'],
             PROV.specializationOf, HOST['d']), g)

        # Change from first submitted patch
        self.assertIn(  # None means any
            (HOST['h#change-2'], PROV.startedAtTime, None), g)
        self.assertIn(  # None means any
            (HOST['h#change-2'], PROV.endedAtTime, None), g)
        start = g.value(
            subject=HOST['h#change-2'],
            predicate=PROV.startedAtTime)
        self.assertEqual(start.datatype, XSD.dateTime)
        self.assertRegex(start.value.isoformat(), W3CDTF)
        end = g.value(
            subject=HOST['h#change-2'],
            predicate=PROV.endedAtTime)
        self.assertEqual(end.datatype, XSD.dateTime)
        self.assertRegex(end.value.isoformat(), W3CDTF)
        self.assertIn(
            (HOST['h#change-2'], PROV.wasAssociatedWith,
             URIRef('https://orcid.org/1234-5678-9101-112X')), g)
        self.assertIn(
            (HOST['h#change-2'], PROV.wasAssociatedWith,
             URIRef('https://orcid.org/1211-1098-7654-321X')), g)
        for association in g.subjects(
                predicate=PROV.agent,
                object=URIRef('https://orcid.org/1234-5678-9101-112X')):
            role = g.value(subject=association, predicate=PROV.hadRole)
#.........这里部分代码省略.........
开发者ID:periodo,项目名称:periodo-server,代码行数:103,代码来源:test_provenance.py

示例6: test_get_history

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    def test_get_history(self):
        with open(filepath('test-patch-adds-items.json')) as f:
            patch = f.read()

        with self.client as client:
            res1 = client.patch(
                '/d/',
                data=patch,
                content_type='application/json',
                headers={'Authorization': 'Bearer '
                         + 'NTAwNWViMTgtYmU2Yi00YWMwLWIwODQtMDQ0MzI4OWIzMzc4'})
            patch_url = urlparse(res1.headers['Location']).path
            client.post(
                patch_url + 'merge',
                headers={'Authorization': 'Bearer '
                         + 'ZjdjNjQ1ODQtMDc1MC00Y2I2LThjODEtMjkzMmY1ZGFhYmI4'})
            res2 = client.get('/h')
            self.assertEqual(res2.status_code, http.client.OK)
            self.assertEqual(
                res2.headers['Content-Type'], 'application/ld+json')
            jsonld = res2.get_data(as_text=True)

        g = ConjunctiveGraph()
        g.parse(format='json-ld', data=jsonld)

        # Initial data load
        self.assertIn(  # None means any
            (PERIODO['p0h#change-1'], PROV.endedAtTime, None), g)
        self.assertIn(
            (PERIODO['p0h#change-1'], PROV.used, PERIODO['p0d?version=0']), g)
        self.assertIn(
            (PERIODO['p0d?version=0'],
             PROV.specializationOf, PERIODO['p0d']), g)
        self.assertIn(
            (PERIODO['p0h#change-1'], PROV.used, PERIODO['p0h#patch-1']), g)
        self.assertIn(
            (PERIODO['p0h#patch-1'],
             FOAF.page, PERIODO['p0patches/1/patch.jsonpatch']), g)
        self.assertIn(
            (PERIODO['p0h#change-1'],
             PROV.generated, PERIODO['p0d?version=1']), g)
        self.assertIn(
            (PERIODO['p0d?version=1'],
             PROV.specializationOf, PERIODO['p0d']), g)
        self.assertIn(
            (PERIODO['p0h#change-1'],
             PROV.generated, PERIODO['p0trgkv?version=1']), g)
        self.assertIn(
            (PERIODO['p0trgkv?version=1'],
             PROV.specializationOf, PERIODO['p0trgkv']), g)
        self.assertIn(
            (PERIODO['p0h#change-1'],
             PROV.generated, PERIODO['p0trgkvwbjd?version=1']), g)
        self.assertIn(
            (PERIODO['p0trgkvwbjd?version=1'],
             PROV.specializationOf, PERIODO['p0trgkvwbjd']), g)

        # Change from first submitted patch
        self.assertIn(  # None means any
            (PERIODO['p0h#change-2'], PROV.startedAtTime, None), g)
        self.assertIn(  # None means any
            (PERIODO['p0h#change-2'], PROV.endedAtTime, None), g)
        start = g.value(
            subject=PERIODO['p0h#change-2'],
            predicate=PROV.startedAtTime)
        self.assertEqual(start.datatype, XSD.dateTime)
        self.assertRegex(start.value.isoformat(), W3CDTF)
        end = g.value(
            subject=PERIODO['p0h#change-2'],
            predicate=PROV.endedAtTime)
        self.assertEqual(end.datatype, XSD.dateTime)
        self.assertRegex(end.value.isoformat(), W3CDTF)
        self.assertIn(
            (PERIODO['p0h#change-2'], PROV.wasAssociatedWith,
             URIRef('http://orcid.org/1234-5678-9101-112X')), g)
        self.assertIn(
            (PERIODO['p0h#change-2'], PROV.wasAssociatedWith,
             URIRef('http://orcid.org/1211-1098-7654-321X')), g)
        for association in g.subjects(
                predicate=PROV.agent,
                object=URIRef('http://orcid.org/1234-5678-9101-112X')):
            role = g.value(subject=association, predicate=PROV.hadRole)
            self.assertIn(role, (PERIODO['p0v#submitted'],
                                 PERIODO['p0v#updated']))
        merger = g.value(
            predicate=PROV.agent,
            object=URIRef('http://orcid.org/1211-1098-7654-321X'))
        self.assertIn(
            (PERIODO['p0h#change-2'], PROV.qualifiedAssociation, merger), g)
        self.assertIn(
            (merger, PROV.hadRole, PERIODO['p0v#merged']), g)
        self.assertIn(
            (PERIODO['p0h#change-2'], PROV.used, PERIODO['p0d?version=1']), g)
        self.assertIn(
            (PERIODO['p0d?version=1'],
             PROV.specializationOf, PERIODO['p0d']), g)
        self.assertIn(
            (PERIODO['p0h#change-2'], PROV.used, PERIODO['p0h#patch-2']), g)
        self.assertIn(
            (PERIODO['p0h#patch-2'],
#.........这里部分代码省略.........
开发者ID:weberjavi,项目名称:periodo-server,代码行数:103,代码来源:test_provenance.py

示例7: Literal

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    primer.add((myNS.pat, myNS.knows, myNS.jo))
    # or:
    primer.add((myNS['pat'], myNS['age'], Literal(24)))

    # Now, with just that, lets see how the system
    # recorded *way* too many details about what
    # you just asserted as fact.
    #

    from pprint import pprint
    pprint(list(primer))

    # just think .whatever((s, p, o))
    # here we report on what we know

    pprint(list(primer.subjects()))
    pprint(list(primer.predicates()))
    pprint(list(primer.objects()))

    # and other things that make sense

    # what do we know about pat?
    pprint(list(primer.predicate_objects(myNS.pat)))

    # who is what age?
    pprint(list(primer.subject_objects(myNS.age)))

    # Okay, so lets now work with a bigger
    # dataset from the example, and start
    # with a fresh new graph.
开发者ID:RDFLib,项目名称:rdflib,代码行数:32,代码来源:swap_primer.py

示例8: URIRef

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    raise


# Test6: ontology is internally consistent with respect to domains, ranges, etc

# step 1: find all the classes.
rdftype = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
rdfsdomain = URIRef("http://www.w3.org/2000/01/rdf-schema#domain")
rdfsrange = URIRef("http://www.w3.org/2000/01/rdf-schema#range")
rdfsresource = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Resource")
rdfssco = URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf")
asColl = URIRef("http://www.w3.org/ns/activitystreams#OrderedCollection")
skosConcept = URIRef("http://www.w3.org/2004/02/skos/core#Concept")

otherClasses = [asColl, skosConcept]
classes = list(g.subjects(rdftype, URIRef("http://www.w3.org/2000/01/rdf-schema#Class")))
props = list(g.subjects(rdftype, URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Property")))

for p in props:
    domains = list(g.objects(p, rdfsdomain))
    for d in domains:
        assert(d in classes)

for p in props:
    ranges = list(g.objects(p, rdfsrange))
    for r in ranges:
        if not r in classes and not str(r).startswith("http://www.w3.org/2001/XMLSchema#") and \
            not r == rdfsresource:
            print "Found inconsistent property: %s has unknown range" % p

for c in classes:
开发者ID:AutomatedTester,项目名称:web-platform-tests,代码行数:33,代码来源:vocab_tester.py

示例9: graph_plan

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def graph_plan(plan, fountain):
    plan_graph = ConjunctiveGraph()
    plan_graph.bind('agora', AGORA)
    prefixes = plan.get('prefixes')
    ef_plan = plan.get('plan')
    tree_lengths = {}
    s_trees = set([])
    patterns = {}

    for (prefix, u) in prefixes.items():
        plan_graph.bind(prefix, u)

    def __get_pattern_node(p):
        if p not in patterns:
            patterns[p] = BNode('tp_{}'.format(len(patterns)))
        return patterns[p]

    def __inc_tree_length(tree, l):
        if tree not in tree_lengths:
            tree_lengths[tree] = 0
        tree_lengths[tree] += l

    def __add_variable(p_node, vid, subject=True):
        sub_node = BNode(str(vid).replace('?', 'var_'))
        if subject:
            plan_graph.add((p_node, AGORA.subject, sub_node))
        else:
            plan_graph.add((p_node, AGORA.object, sub_node))
        plan_graph.set((sub_node, RDF.type, AGORA.Variable))
        plan_graph.set((sub_node, RDFS.label, Literal(str(vid), datatype=XSD.string)))

    def include_path(elm, p_seeds, p_steps):
        elm_uri = __extend_uri(prefixes, elm)
        path_g = plan_graph.get_context(elm_uri)
        b_tree = BNode(elm_uri)
        s_trees.add(b_tree)
        path_g.set((b_tree, RDF.type, AGORA.SearchTree))
        path_g.set((b_tree, AGORA.fromType, elm_uri))

        for seed in p_seeds:
            path_g.add((b_tree, AGORA.hasSeed, URIRef(seed)))

        previous_node = b_tree
        __inc_tree_length(b_tree, len(p_steps))
        for j, step in enumerate(p_steps):
            prop = step.get('property')
            b_node = BNode(previous_node.n3() + prop)
            if j < len(p_steps) - 1 or pattern[1] == RDF.type:
                path_g.add((b_node, AGORA.onProperty, __extend_uri(prefixes, prop)))
            path_g.add((b_node, AGORA.expectedType, __extend_uri(prefixes, step.get('type'))))
            path_g.add((previous_node, AGORA.next, b_node))
            previous_node = b_node

        p_node = __get_pattern_node(pattern)
        path_g.add((previous_node, AGORA.byPattern, p_node))

    for i, tp_plan in enumerate(ef_plan):
        paths = tp_plan.get('paths')
        pattern = tp_plan.get('pattern')
        hints = tp_plan.get('hints')
        context = BNode('space_{}'.format(tp_plan.get('context')))
        for path in paths:
            steps = path.get('steps')
            seeds = path.get('seeds')
            if not len(steps) and len(seeds):
                include_path(pattern[2], seeds, steps)
            elif len(steps):
                ty = steps[0].get('type')
                include_path(ty, seeds, steps)

        for t in s_trees:
            plan_graph.set((t, AGORA.length, Literal(tree_lengths.get(t, 0), datatype=XSD.integer)))

        pattern_node = __get_pattern_node(pattern)
        plan_graph.add((context, AGORA.definedBy, pattern_node))
        plan_graph.set((context, RDF.type, AGORA.SearchSpace))
        plan_graph.add((pattern_node, RDF.type, AGORA.TriplePattern))
        (sub, pred, obj) = pattern

        if isinstance(sub, BNode):
            __add_variable(pattern_node, str(sub))
        elif isinstance(sub, URIRef):
            plan_graph.add((pattern_node, AGORA.subject, sub))

        if isinstance(obj, BNode):
            __add_variable(pattern_node, str(obj), subject=False)
        elif isinstance(obj, Literal):
            node = BNode(str(obj).replace(' ', ''))
            plan_graph.add((pattern_node, AGORA.object, node))
            plan_graph.set((node, RDF.type, AGORA.Literal))
            plan_graph.set((node, AGORA.value, Literal(str(obj), datatype=XSD.string)))
        else:
            plan_graph.add((pattern_node, AGORA.object, obj))

        plan_graph.add((pattern_node, AGORA.predicate, pred))
        if pred == RDF.type:
            if 'check' in hints:
                plan_graph.add((pattern_node, AGORA.checkType, Literal(hints['check'], datatype=XSD.boolean)))

        sub_expected = plan_graph.subjects(predicate=AGORA.expectedType)
#.........这里部分代码省略.........
开发者ID:SmartDeveloperHub,项目名称:agora-planner,代码行数:103,代码来源:graph.py

示例10: create_graph

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def create_graph(filelist, output_train, output_test, pos_graphs, cv, predicate, ob):
	global relation_counter
	relation_counter = 1000000
	global entity_counter
	global local_entity_counter
	global local_entity_map
	global id_to_uri
	id_to_uri = dict()

	entity_counter = 0
	entity_map = dict()
	relation_map = dict()
	graph_labels_train = []
	graph_labels_test = []
	filelist = np.array(filelist)
	i_fold = 0
	for train_index, test_index in cross_validation.KFold(len(filelist), n_folds=cv):
		train = True
		test = True
		filelist_train = filelist[train_index]
		filelist_test = filelist[test_index]

		output_train_tmp = output_train + str(i_fold) + ".txt"
		output_test_tmp = output_test + str(i_fold) + ".txt"

		# delete train and test output files
		try:
			os.remove(output_train_tmp)
		except OSError:
			pass
		try:
			os.remove(output_test_tmp)
		except OSError:
			pass
		# First round train then test
		while train or test:
			graph_labels_tmp = []
			filelist_tmp = None
			graph_labels_list_tmp = None
			if train:
				filelist_tmp = filelist_train
				output_tmp = output_train_tmp
				train = False
				graph_labels_list_tmp = graph_labels_train
			else:
				filelist_tmp = filelist_test
				output_tmp = output_test_tmp
				test = False
				graph_labels_list_tmp = graph_labels_test
			for f in filelist_tmp:
				num = int(f.split("_")[1])
				labels = pos_graphs[num]
				graph_labels_tmp.append(labels)
				g = ConjunctiveGraph()
				g.load(open(f, "rb"))
				operations = list(g.subjects(predicate, ob))
				with open(output_tmp, "a") as tf:
					o = operations[0]
					entity_set = set()
					edge_set = []
					local_entity_counter = 0
					local_entity_map = []
					local_entity_counter = 0
					local_entity_map = dict()
					dfs_triples(entity_set, entity_map, edge_set, relation_map, g, o)
					#id = list(g.objects(o, ID))[0]
					tf.write("t")
					tf.write("\n")
					for (local_id, global_id) in sorted(entity_set, key=lambda x: x[0]):
						tf.write("v" + " " + str(local_id) + " " + str(global_id))
						tf.write("\n")
					for (s,p,o) in edge_set:
						tf.write("e" + " " + str(s) + " " + str(o) + " " + str(p))
						tf.write("\n")
			graph_labels_list_tmp.append(graph_labels_tmp)
		i_fold += 1
	return id_to_uri, graph_labels_train, graph_labels_test
开发者ID:NetherNova,项目名称:grakelasso,代码行数:79,代码来源:fileio.py

示例11: add_property_axioms

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
    def add_property_axioms(graph, properties):
        ontology_graph = ConjunctiveGraph()
        GH = 'https://raw.githubusercontent.com'
        MI = '/monarch-initiative'
        ontologies = [
            GH + MI + '/SEPIO-ontology/master/src/ontology/sepio.owl',
            GH + MI + '/GENO-ontology/develop/src/ontology/geno.owl',
            GH + '/oborel/obo-relations/master/ro.owl',
            'http://purl.obolibrary.org/obo/iao.owl',
            'http://purl.obolibrary.org/obo/ero.owl',
            GH + '/jamesmalone/OBAN/master/ontology/oban_core.ttl',
            'http://purl.obolibrary.org/obo/pco.owl',
            'http://purl.obolibrary.org/obo/xco.owl'
        ]

        # random timeouts can waste hours. (too many redirects?)
        # there is a timeout param in urllib.request,
        # but it is not exposed by rdflib.parsing
        # so retry once on URLError
        for ontology in ontologies:
            logger.info("parsing: " + ontology)
            try:
                ontology_graph.parse(
                    ontology, format=rdflib_util.guess_format(ontology))
            except SAXParseException as e:
                logger.error(e)
                logger.error('Retrying as turtle: ' + ontology)
                ontology_graph.parse(ontology, format="turtle")
            except OSError as e:  # URLError:
                # simple retry
                logger.error(e)
                logger.error('Retrying: ' + ontology)
                ontology_graph.parse(
                    ontology, format=rdflib_util.guess_format(ontology))

        # Get object properties
        graph = GraphUtils.add_property_to_graph(
            ontology_graph.subjects(RDF['type'], OWL['ObjectProperty']),
            graph, OWL['ObjectProperty'], properties)

        # Get annotation properties
        graph = GraphUtils.add_property_to_graph(
            ontology_graph.subjects(RDF['type'], OWL['AnnotationProperty']),
            graph, OWL['AnnotationProperty'], properties)

        # Get data properties
        graph = GraphUtils.add_property_to_graph(
            ontology_graph.subjects(RDF['type'], OWL['DatatypeProperty']),
            graph, OWL['DatatypeProperty'], properties)

        for row in graph.predicates(DC['source'], OWL['AnnotationProperty']):
            if row == RDF['type']:
                graph.remove(
                    (DC['source'], RDF['type'], OWL['AnnotationProperty']))
        graph.add((DC['source'], RDF['type'], OWL['ObjectProperty']))

        # Hardcoded properties
        graph.add((
            URIRef('https://monarchinitiative.org/MONARCH_cliqueLeader'),
            RDF['type'], OWL['AnnotationProperty']))

        graph.add((URIRef('https://monarchinitiative.org/MONARCH_anonymous'),
                  RDF['type'], OWL['AnnotationProperty']))

        return graph
开发者ID:DoctorBud,项目名称:dipper,代码行数:67,代码来源:GraphUtils.py

示例12: __init__

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
class KB4ITGraph:
    """
    This class creates a RDF graph based on attributes for each doc.
    Also it has convenient function to ask the graph
    """
    def __init__(self, path=None):
        """
        If not path is passed it build a graph in memory. Otherwise, it
        creates a persistent graph in disk.
        """
        if path is not None:
            # Create persistent Graph in disk
            self.path = path
            self.graph = ConjunctiveGraph('Sleepycat', URIRef("kb4it://"))
            graph_path = path + SEP + 'kb4it.graph'
            self.graph.store.open(graph_path)
        else:
            # Create Graph in Memory
            self.graph = ConjunctiveGraph('IOMemory')

        # Assign namespaces to the Namespace Manager of this graph
        namespace_manager = NamespaceManager(ConjunctiveGraph())
        for ns in NSBINDINGS:
            namespace_manager.bind(ns, NSBINDINGS[ns])
        self.graph.namespace_manager = namespace_manager


    def __uniq_sort(self, result):
        alist = list(result)
        aset = set(alist)
        alist = list(aset)
        alist.sort()
        return alist


    def subjects(self, predicate, object):
        """
        Returns a list of sorted and uniques subjects given a predicate
        and an object.
        """
        return self.__uniq_sort(self.graph.subjects(predicate, object))


    def predicates(self, subject=None, object=None):
        """
        Returns a list of sorted and uniques predicates given a subject
        and an object.
        """
        return self.__uniq_sort(self.graph.predicates(subject, object))


    def objects(self, subject, predicate):
        """
        Returns a list of sorted and uniques objects given a subject
        and an predicate.
        """
        return self.__uniq_sort(self.graph.objects(subject, predicate))


    def value(self, subject=None, predicate=None, object=None, default=None, any=True):
        """
        Returns a value given the subject and the predicate.
        """
        return self.graph.value(subject, predicate, object, default, any)


    def add_document(self, doc):
        """
        Add a new document to the graph.
        """
        subject = URIRef(doc)
        predicate = RDF['type']
        object = URIRef(KB4IT['Document'])
        self.graph.add([subject, predicate, object])


    def add_document_attribute(self, doc, attribute, value):
        """
        Add a new attribute to a document
        """
        predicate = 'has%s' % attribute
        subject = URIRef(doc)
        predicate = KB4IT[predicate]
        object = Literal(value)
        self.graph.add([subject, predicate, object])


    def get_attributes(self):
        """
        Get all predicates except RFD.type and Title
        """
        blacklist = set()
        blacklist.add(RDF['type'])
        blacklist.add(KB4IT['hasTitle'])
        alist = list(self.graph.predicates(None, None))
        aset = set(alist) - blacklist
        alist = list(aset)
        alist.sort()
        return alist

#.........这里部分代码省略.........
开发者ID:t00m,项目名称:KB4IT,代码行数:103,代码来源:rdfdb.py

示例13: get_vocab_base

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def get_vocab_base(vocabfile):
    graph = Graph()
    try:
        graph.parse(vocabfile)
    except:
        graph = None
        graph = Graph()
        try:
            graph.parse(vocabfile, format="n3")
        except:
            return (None, None, None)
    identifier = None
    for v in graph.objects(None, namespaces['dc']['identifier']):
        identifier = v
    if not identifier:
        for v in graph.objects(None, namespaces['dcterms']['identifier']):
            identifier = v

    base = None
    if not base:
        for s in graph.subjects(namespaces['rdf']['type'], namespaces['owl']['Ontology']):
            base = s
            break
    if not base:
        for s in graph.subjects(namespaces['dc']['title'], None):
            base = s
            break
    if not base:
        for s in graph.subjects(namespaces['dcterms']['title'], None):
            base = s
            break
    if not base:
        for s in graph.subjects(namespaces['dc']['creator'], None):
            base = s
            break
    if not base:
        for s in graph.subjects(namespaces['dcterms']['creator'], None):
            base = s
            break
    if not base:
        for v in graph.objects(None, namespaces['vann']['preferredNamespaceUri']):
            base = v
            break
    if not base:
        for v in graph.namespaces():
            if v[0] == '':
                base = v[1]
                break

    prefix = None
    vocab_prefixes = graph.objects(None, namespaces['vann']['preferredNamespacePrefix'])
    for vp in vocab_prefixes:
        prefix = vp
    if not prefix and base:
        for v in graph.namespaces():
            if str(v[1]) == str(base):
                prefix = v[0]
                break
    if not prefix and base:
        prefix = base.strip().strip('/').split('/')[-1].strip('#').strip(' ')
    if base:
        base = base.strip()
        if (base[-1]!="/" and base[-1]!="#"):
            base += "#"
    return (identifier, base, prefix)
开发者ID:anusharanganathan,项目名称:Vocab-ox,代码行数:67,代码来源:rdf_helper.py

示例14: create_vocab_statusfile

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
def create_vocab_statusfile(userid, vocabprefix, vocabfile, baseuri, update=False, using_uuid=False, refvocab=False):
    vocab_uri = URIRef("http://vocab.ox.ac.uk/%s"%vocabprefix)
    vocabdir = os.path.join(ag.vocabulariesdir, str(vocabprefix))
    vocabstatusfile = os.path.join(vocabdir, "status.rdf")
    vocab_file_name = os.path.basename(vocabfile)
    vocabfile_uri = URIRef("http://vocab.ox.ac.uk/%s/%s"%(vocabprefix, vocab_file_name))

    #Add vocab in mediator file
    graph = Graph()
    mediatorfile = os.path.join(ag.mediatorsdir, '%s.rdf'%userid)
    graph.parse(mediatorfile)
    user_uri = []
    for uri in graph.subjects(namespaces['foaf']['account'], Literal(userid)):
        if not uri in user_uri:
            user_uri.append(uri)
    user_uri = URIRef(user_uri[0])
    graph.add((vocab_uri, namespaces['dcterms']['mediator'], URIRef(user_uri)))
    rdf_str = None
    rdf_str = graph.serialize()
    f = codecs.open(mediatorfile, 'w', 'utf-8')
    f.write(rdf_str)
    f.close()

    #Add vocab in vocab status file
    graph = Graph()
    if update and os.path.isfile(vocabstatusfile):
        graph.parse(vocabstatusfile)
    for prefix, url in namespaces.iteritems():
        graph.bind(prefix, URIRef(url))
    graph.add((vocab_uri, namespaces['dcterms']['mediator'], URIRef(user_uri)))
    graph.add((user_uri, namespaces['foaf']['account'], Literal(userid)))
    graph.add((vocab_uri, namespaces['dcterms']['hasFormat'], URIRef(vocabfile_uri)))
    graph.add((vocab_uri, namespaces['vann']['preferredNamespaceUri'], URIRef(baseuri)))
    graph.add((vocab_uri, namespaces['vann']['preferredNamespacePrefix'], Literal(vocabprefix)))
    graph.add((vocab_uri, namespaces['skos']['editorialNote'], Literal(vocab_editorial_descriptions[0])))
    if refvocab:
        add_ref_vocab(vocabprefix, refvocab)
        graph.add((vocab_uri, namespaces['dcterms']['isVersionOf'], URIRef(refvocab)))
    # get mimetype of file
    if os.path.isfile(vocabfile):
        graph.add((vocabfile_uri, namespaces['nfo']['fileUrl'], Literal('file://%s'%vocabfile)))
        graph.add((vocabfile_uri, namespaces['nfo']['fileName'], Literal(vocab_file_name)))
        mt = None
        if check_rdf(vocabfile):
            mt = 'application/rdf+xml'
            graph.add((vocabfile_uri, namespaces['dcterms']['conformsTo'], Literal(mt)))
            graph.add((vocabfile_uri, namespaces['skos']['editorialNote'], Literal(vocab_editorial_descriptions[3])))
        elif check_n3(vocabfile):
            mt = 'text/rdf+nt'
            root, ext = os.path.splitext(vocabfile)
            if ext == '.rdf':
                rdffile = "%s_2.rdf"%root
            else:
                rdffile = "%s.rdf"%root
            converttordf = convert_n3_rdf(vocabfile, rdffile)
            if converttordf and os.path.isfile(rdffile):
                rdf_file_name = os.path.basename(rdffile)
                rdffile_uri = URIRef("http://vocab.ox.ac.uk/%s/%s"%(vocabprefix, rdf_file_name))
                graph.add((vocab_uri, namespaces['dcterms']['hasFormat'], URIRef(rdffile_uri)))
                graph.add((rdffile_uri, namespaces['nfo']['fileUrl'], Literal('file://%s'%rdffile)))
                graph.add((rdffile_uri, namespaces['nfo']['fileName'], Literal(rdf_file_name)))
                graph.add((rdffile_uri, namespaces['dcterms']['conformsTo'], Literal('application/rdf+xml')))
                graph.add((rdffile_uri, namespaces['skos']['editorialNote'], Literal(vocab_editorial_descriptions[3])))
                graph.add((rdffile_uri, namespaces['dcterms']['format'], Literal('application/rdf+xml')))
        else:
            mt1 = mimetypes.guess_type(vocabfile)
            mt2 = get_file_mimetype(vocabfile)
            if mt1[0]:
                mt = mt1[0]
            else:
                mt = mt2
            if str(mt) == 'application/rdf+xml':
                graph.add((vocabfile_uri, namespaces['skos']['editorialNote'], Literal(vocab_editorial_descriptions[2])))
            else:
                graph.add((vocab_uri, namespaces['skos']['editorialNote'], Literal(vocab_editorial_descriptions[1])))
        if mt:
            graph.add((vocabfile_uri, namespaces['dcterms']['format'], Literal(mt)))
    rdf_str = None
    rdf_str = graph.serialize()
    f = codecs.open(vocabstatusfile, 'w', 'utf-8')
    f.write(rdf_str)
    f.close()
    return True
开发者ID:anusharanganathan,项目名称:Vocab-ox,代码行数:85,代码来源:rdf_helper.py

示例15: Graph

# 需要导入模块: from rdflib import ConjunctiveGraph [as 别名]
# 或者: from rdflib.ConjunctiveGraph import subjects [as 别名]
import urllib
from rdflib import ConjunctiveGraph as Graph
import sparta

url = 'http://www.gopubmed.org/GoMeshPubMed/gomeshpubmed/Search/RDF?q=18463287&type=RdfExportAll'
gopubmed_handle = urllib.urlopen(url)
graph = Graph()
graph.parse(gopubmed_handle)
gopubmed_handle.close()

graph_subjects = list(set(graph.subjects()))
sparta_factory = sparta.ThingFactory(graph)
for subject in graph_subjects:
    sparta_graph = sparta_factory(subject)
    print subject, [unicode(i) for i in sparta_graph.dc_title][0]
开发者ID:16NWallace,项目名称:bcbb,代码行数:17,代码来源:sparta_ex.py


注:本文中的rdflib.ConjunctiveGraph.subjects方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。