当前位置: 首页>>代码示例>>Python>>正文


Python Graph.parse方法代码示例

本文整理汇总了Python中rdflib.Graph.parse方法的典型用法代码示例。如果您正苦于以下问题:Python Graph.parse方法的具体用法?Python Graph.parse怎么用?Python Graph.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在rdflib.Graph的用法示例。


在下文中一共展示了Graph.parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: skosd

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def skosd(url, lang="en"):
    """
    Pass in a URL (can be a file protocol) for a SKOS file and get 
    back a dictionary of code => values.
    """
    graph = Graph()
    graph.parse(url)

    skos_dictionary = {}
    skos_list = []
    for concept in graph.subjects(RDF.type, skos.Concept):

        # determine the code
        code = graph.value(concept, skos.notation)

        # get the preferred language label, there could be more than one
        labels = list(graph.objects(concept, skos.prefLabel))
        if len(labels) > 1:
            for label in labels: 
                if label.language == lang:
                    break
        else:
            label = labels[0]

        if code:
            skos_dictionary[code] = label
        else:
            skos_list.append(label)

    if len(skos_dictionary.keys()) > 0:
        return skos_dictionary
    return skos_list
开发者ID:edsu,项目名称:skosd,代码行数:34,代码来源:skosd.py

示例2: parse

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
    def parse(self):
        if "workflowBundle.ttl" in self.zip.namelist():
            format = "n3" 
            rootfile = "workflowBundle.ttl"
        elif "workflowBundle.rdf" in self.zip.namelist():
            rootfile = "workflowBundle.rdf"
            format = "xml" 
        else:
            raise Scufl2Error("Can't find workflowBundle.ttl or "
                              "workflowBundle.rdf")

        self.uri = "file://" + urllib.pathname2url(os.path.abspath(self.filename)) + "/"
        early_graph = Graph()    
        rdf_file = self.zip.open(rootfile)
        early_graph.parse(rdf_file, self.uri, format=format)
        sameBaseAs = list(early_graph.objects(subject=URIRef(self.uri), predicate=Scufl2NS.sameBaseAs))

        if not sameBaseAs:
            # Fall back to the file:/// URIs   
            self.graph = early_graph
        else:    
            # Use the sameBaseAs as the base
            self.uri = sameBaseAs[0]
            self.graph = Graph()
            # Reparse it
            rdf_file = self.zip.open(rootfile)
            self.graph.parse(rdf_file, self.uri, format=format)

        self.parse_all_graphs(self.uri)
开发者ID:egarcia1357,项目名称:scufl2,代码行数:31,代码来源:processorNames.py

示例3: find_location

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def find_location(textlocation):
    """
    returns a 2-tuple containing the RDFLIB node of textlocation as for
    the geonames api search, and the RDF-Graph with its description in Geonames.
    raise NotFoundException if textlocation was not found in GeoNames
    """
    payload = {'q' : textlocation,
            'username' : 'edsa_project',
            'featureClass' : 'P',
            'isNameRequired' : 'true',
            'maxRows' : '1'} 
    #TODO: For extra precision, countries need to be translated to ISO-3166.
    # The problem is that US locations have the state.

    r = requests.get('http://api.geonames.org/searchRDF', params=payload)

    g = Graph()
    g.parse(data=r.text, format="xml")

    spquery= """
        SELECT DISTINCT ?iri WHERE {?iri gn:name ?y}
    """
    qres = g.query(spquery)
    iri = ''
    for row in qres:
        iri = row.iri
    if iri == '':
        raise NotFoundException("Could not found "+textlocation)
    else:
        return (iri,g)
开发者ID:alanponce,项目名称:dashboard,代码行数:32,代码来源:geonames.py

示例4: skosdict

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def skosdict(url, lang="en"):
    """
    Pass in a URL (can be a file protocol) for a SKOS file and get 
    back a dictionary of code => values.
    """
    graph = Graph()
    graph.parse(url)

    dictionary = {}
    for concept in graph.subjects(RDF.type, skos.Concept):

        # determine the code
        code = graph.value(concept, skos.notation)
        if not code:
            continue

        # get the preferred language label, there could be more than one
        labels = list(graph.objects(concept, skos.prefLabel))
        if len(labels) > 1:
            for label in labels: 
                if label.language == lang:
                    break
        else:
            label = labels[0]

        dictionary[code] = label
    return dictionary
开发者ID:edsu,项目名称:skosdict,代码行数:29,代码来源:skosdict.py

示例5: main

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def main():

    graph = Graph()
    graph.parse(sys.argv[1], format="n3")

    if len(sys.argv) > 2:
        doc = URIRef(sys.argv[2])
    else:
        docs = []
        for c in (RIF.Document, RIF.BLDDocument, 
                  RIF.PRDDocument, RIF.CoreDocument):
            for x in graph.subjects(RDF.type, c):
                docs.append(x)
        if len(docs) == 1:
            doc = docs[0]
        elif len(docs) > 1:
            print >>sys.stderr, "Input contains multiple Document nodes"
            print >>sys.stderr, indent+",".join([repr(x) for x in docs])
            print >>sys.stderr, "Name one on the command line to select it"
            sys.exit(1)
        elif len(docs) < 1:
            print >>sys.stderr, "Input contains no Document nodes"
            for (s,p,o) in graph:
                print s,p,o
            sys.exit(1)

    out = sys.stdout
    to_rif(out, graph, doc, root=True)
开发者ID:mpetyx,项目名称:pyrif,代码行数:30,代码来源:xtr.py

示例6: test_history_turtle

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
    def test_history_turtle(self):
        with self.client as client:
            res = client.patch(
                '/d/',
                data=self.patch,
                content_type='application/json',
                headers={'Authorization': 'Bearer '
                         + 'NTAwNWViMTgtYmU2Yi00YWMwLWIwODQtMDQ0MzI4OWIzMzc4'})
            res = client.post(
                urlparse(res.headers['Location']).path + 'merge',
                buffered=True,
                headers={'Authorization': 'Bearer '
                         + 'ZjdjNjQ1ODQtMDc1MC00Y2I2LThjODEtMjkzMmY1ZGFhYmI4'})

        res1 = self.client.get('/history.ttl')
        self.assertEqual(res1.status_code, http.client.OK)
        self.assertEqual(res1.headers['Content-Type'], 'text/turtle')
        self.assertEqual(
            res1.headers['Cache-Control'],
            'public, max-age={}'.format(cache.SHORT_TIME))
        self.assertEqual(
            res1.headers['Content-Disposition'],
            'attachment; filename="periodo-history.ttl"')

        g = Graph()
        g.parse(data=res1.get_data(as_text=True), format='turtle')
        self.assertIn((HOST['h#patch-1'],
                       FOAF.page, HOST['patches/1/patch.jsonpatch']), g)
        self.assertIn((HOST['d'],
                       DCTERMS.provenance, HOST['h#changes']), g)

        res3 = self.client.get('/history.ttl/')
        self.assertEqual(res3.status_code, http.client.NOT_FOUND)
开发者ID:periodo,项目名称:periodo-server,代码行数:35,代码来源:test_representation.py

示例7: fine_unique_predicates

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def fine_unique_predicates():
	unique_predicates = []

	for file in glob.glob('*'):
		if '.py' not in file:

			if '.ttl' in file:
				g = Graph()
				g.parse(file, format='n3')


			else:
				g = Graph()
				g.parse(file)		

			
			for stmt in g:
			    pprint.pprint(stmt[1])
			    if str(stmt[1]) not in unique_predicates:
			    	unique_predicates.append(str(stmt[1]))



	print(unique_predicates)		

	for x in unique_predicates:
		print(x)    	
开发者ID:thisismattmiller,项目名称:triple-builder,代码行数:29,代码来源:extract_data.py

示例8: get_all_sells

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def get_all_sells():
    # [0] = url / [1] = [{producte}] / [2] = precio_total
    global compres
    compres = []

    biggest_sell = 0
    counts = []

    graph_compres = Graph()
    graph_compres.parse(open('../data/compres'), format='turtle')

    for compraUrl in graph_compres.subjects(RDF.type, ECSDI.Compra):
        sell_count = 0
        single_sell = [compraUrl]
        products = []
        for productUrl in graph_compres.objects(subject=compraUrl, predicate=ECSDI.Productos):
            sell_count += 1
            products.append(graph_compres.value(subject=productUrl, predicate=ECSDI.Nombre))
        single_sell.append(products)
        for precio_total in graph_compres.objects(subject=compraUrl, predicate=ECSDI.Precio_total):
            single_sell.append(precio_total)
        compres.append(single_sell)
        counts.append(sell_count)
        if sell_count > biggest_sell:
            biggest_sell = sell_count

    return biggest_sell, counts
开发者ID:casassg,项目名称:ecsdi-amazon,代码行数:29,代码来源:UserPersonalAgent.py

示例9: __load_owl

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def __load_owl(owl):
    """

    :param owl:
    :return:
    """
    owl_g = Graph()
    for f in ['turtle', 'xml']:
        try:
            owl_g.parse(source=StringIO.StringIO(owl), format=f)
            break
        except SyntaxError:
            pass

    if not len(owl_g):
        raise VocabularyException()

    try:
        uri = list(owl_g.subjects(RDF.type, OWL.Ontology)).pop()
        vid = [p for (p, u) in owl_g.namespaces() if uri in u and p != '']
        imports = owl_g.objects(uri, OWL.imports)
        if not len(vid):
            vid = urlparse.urlparse(uri).path.split('/')[-1]
        else:
            vid = vid.pop()

        return vid, uri, owl_g, imports
    except IndexError:
        raise VocabularyNotFound()
开发者ID:SmartDeveloperHub,项目名称:agora-fountain,代码行数:31,代码来源:onto.py

示例10: notify

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def notify(uri):
    g = Graph()
    g.add((URIRef(uri), RDF.type, URIRef('http://www.bbc.co.uk/search/schema/ContentItem')))
    g.add((URIRef(uri), URIRef('http://www.bbc.co.uk/search/schema/url'), Literal(uri)))
    g.parse(uri)

    return g.serialize(format='nt').decode('utf-8')
开发者ID:avengerpenguin,项目名称:linked-data-search,代码行数:9,代码来源:tasks.py

示例11: getPreviousGraph

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def getPreviousGraph(sources):
    graph = Graph()
    parse_errors = rdflib.Graph()
    for s in sources:
        graph.parse(s, format='rdfa', pgraph=parse_errors)
    
    return graph
开发者ID:BioSchemas,项目名称:schemaorg,代码行数:9,代码来源:compareterms.py

示例12: ingest

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def ingest(ntriples):

    graph = Graph()
    graph.parse(data=ntriples, format='nt')

    expanded = jsonld.expand(
        json.loads(
            graph.serialize(format='json-ld').decode('utf-8')))

    mandatory_props = [
        'http://www.bbc.co.uk/search/schema/title',
        'http://www.bbc.co.uk/search/schema/url'
    ]

    for json_object in expanded:

        uri = json_object['@id']

        valid = True

        for prop in mandatory_props:
            if prop not in json_object:
                logging.warning(
                    "Not indexing %s due to missing property: %s", uri, prop)
                valid = False

        if valid:
            es.index(index='bbc',
                     body=jsonld.expand(json_object)[0],
                     doc_type='item',
                     id=uri)
开发者ID:avengerpenguin,项目名称:linked-data-search,代码行数:33,代码来源:tasks.py

示例13: turtle

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def turtle(test):
    g = Graph()

    try:
        base = 'http://www.w3.org/2013/TurtleTests/'+split_uri(test.action)[1]

        g.parse(test.action, publicID=base, format='turtle')
        if not test.syntax:
            raise AssertionError("Input shouldn't have parsed!")

        if test.result: # eval test
            res = Graph()
            res.parse(test.result, format='nt')

            if verbose:
                both, first, second = graph_diff(g,res)
                if not first and not second: return
                print "Diff:"
                #print "%d triples in both"%len(both)
                print "Turtle Only:"
                for t in first:
                    print t

                print "--------------------"
                print "NT Only"
                for t in second:
                    print t
                raise Exception('Graphs do not match!')

            assert isomorphic(g, res), 'graphs must be the same'


    except:
        if test.syntax:
            raise
开发者ID:Dataliberate,项目名称:rdflib,代码行数:37,代码来源:test_turtle_w3c.py

示例14: test__dataset__add_proxy_resource_uris_to_graph

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def test__dataset__add_proxy_resource_uris_to_graph():
    sample_url = "http://purl.org/dc/elements/1.1/subject"
    ds = DataSet.objects.create(**ds_fields)
    assert ds

    proxy_field = ds.generate_proxyfield_uri("Sjoerd, Siebinga", language="nl")
    assert proxy_field.endswith("/resource/dataset/afrikamuseum/nl/Sjoerd,_Siebinga")
    proxy_field = ds.generate_proxyfield_uri("Sjoerd, Siebinga", language=None)
    assert proxy_field.endswith("/resource/dataset/afrikamuseum/Sjoerd,_Siebinga")

    graph = Graph(identifier="http://acc.dcn.delving.org/resource/aggregation/afrikamuseum/100-1")
    graph.namespace_manager = lod.namespace_manager
    graph.parse(data=test_graph, format='xml')
    assert graph
    assert len(list(graph.objects(predicate=URIRef(sample_url)))) == 3
    assert all(isinstance(obj, Literal) for obj in graph.objects(predicate=URIRef(sample_url)))

    new_graph, converted_literals = ds.update_graph_with_proxy_field(graph, sample_url)
    assert new_graph
    assert converted_literals
    assert all(isinstance(obj, URIRef) for obj in new_graph.objects(predicate=URIRef(sample_url)))

    assert len(converted_literals) == 3
    coined_uri, obj = sorted(converted_literals)[0]
    assert coined_uri.endswith('/resource/dataset/afrikamuseum/beelden')
    assert obj.value == 'beelden'
开发者ID:delving,项目名称:nave,代码行数:28,代码来源:test_models.py

示例15: ext_json

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import parse [as 别名]
def ext_json():
    rdfUrl = ''
    tok = Tokenizer()
    if request.method == 'POST':
        rdf = request.form['data']
        status_test = "0"#request.form['status']
        filters = ""#request.form['exculdeurls']
        #rdf = "http://jpp.no-ip.org/MAD_J.rdf"
        try:
            #r = requests.get(rdf)
            gg = Graph()
            #g.load(rdfUrl)
            rdf_content = StringIO.StringIO(rdf.encode('utf-8'))
            #print rdf_content.readline()
            gg.parse(rdf_content,  format="xml")
            ext = Extractor(gg)
            uris = ext.getUris()
            mapping = MapFactory()
            for uri in uris:
                term = tok.tokenized_url(uri)
                uri_status = ""
                if status_test == "1":
                    uri_status = ext.testUri(uri)
                else:
                    uri_status = "N/A"  
                uri_lookup = str(uri)+"\"" 
                lnum = ext.get_lines(rdf_content, uri_lookup)          
                ent = MapEntry(uri, term, "", lnum, uri_status)
                mapping.add(ent)
            jsonized_result = json.dumps(mapping.get())              
            return Response(jsonized_result, mimetype='application/json')
        except requests.exceptions.ConnectionError:
            X2Rwarning = 'X2R Warning: The requested URL raises ConnectionError~!!!'
            return X2Rwarning
开发者ID:FengPu,项目名称:x2r-me,代码行数:36,代码来源:x2r-me.py


注:本文中的rdflib.Graph.parse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。