当前位置: 首页>>代码示例>>Python>>正文


Python Graph.qname方法代码示例

本文整理汇总了Python中rdflib.Graph.qname方法的典型用法代码示例。如果您正苦于以下问题:Python Graph.qname方法的具体用法?Python Graph.qname怎么用?Python Graph.qname使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在rdflib.Graph的用法示例。


在下文中一共展示了Graph.qname方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_entry

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import qname [as 别名]
    def add_entry(self,rdf_url,pdf_url):
        g = Graph()
        g.bind('dct','http://purl.org/dc/terms/')
        g.bind('rpubl','http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#')
        g.parse(urlopen(rdf_url))

        # first get type
        for (s,p,o) in g:
            if p == RDF.type:
                if o == RPUBL['Myndighetsforeskrift']:
                    doctype = 'fs_doc_myndighetsforeskrift'
                    table =  'fs_doc_fsdokument'
                    targetdir = 'foreskrift'
                elif o == RPUBL['AllmannaRad']:
                    doctype = 'fs_doc_allmannarad'
                    table =  'fs_doc_fsdokument'
                    targetdir = 'allmanna_rad'
                elif o == RPUBL['KonsolideradGrundforfattning']:
                    doctype = 'fs_doc_konsolideradforeskrift'
                    table =  'fs_doc_konsolideradforeskrift'
                    targetdir = 'konsoliderad_foreskrift'
                else:
                    sys.stderr.write("Can't handle type %s\n" % o)

                docid = len(self.data[table]) + 1
                self.current_document['id'] = str(docid)
                self.current_document['is_published'] = '0'
                if table == 'fs_doc_fsdokument':
                    self.current_subdocument['fsdokument_ptr_id'] = str(docid)

        # then iterate through other properties, dynamically
        # calling appropriate functions to massage data and put it
        # where it belongs.
        for (s,p,o) in g:
            funcname = g.qname(p).replace(":","_")
            #if funcname in globals():
            if hasattr(self,funcname):
                #sys.stderr.write("    Calling self.%s\n" % funcname)
                f = getattr(self,funcname)
                #globals()[funcname](o,doctype)
                f(o,doctype)
            else:
                sys.stderr.write("  Cant handle predicate %s\n" % funcname.replace("_",":"))

        # check for required fields:
        d = self.current_document
        sub_d = self.current_subdocument

        for fld in ('arsutgava','lopnummer','forfattningssamling_id'):
            assert fld in d

        # Create filename base, eg "FFFS-2011-42"
        fs = self.data['fs_doc_forfattningssamling'][int(d['forfattningssamling_id'])-1]
        basefile = "%s-%s-%s" % (fs['kortnamn'],d['arsutgava'], d['lopnummer'])

        if not os.path.exists(targetdir):
            os.makedirs(targetdir)
        outfile = "%s/%s.pdf" % (targetdir,basefile)
        urlretrieve(pdf_url,outfile)
        sub_d['content'] = outfile

        md5 = hashlib.md5()
        with open(outfile,'rb') as f: 
            for chunk in iter(lambda: f.read(8192), ''): 
                md5.update(chunk)
        d['content_md5'] = md5.hexdigest()

        # Make sure all other fields have some sort of data
        if not 'sammanfattning' in d:
            d['sammanfattning'] = ""
        if not 'omtryck' in d:
            d['omtryck'] = '0'
        if not 'beslutsdatum' in d:
            d['beslutsdatum'] = "%s-12-31"%d['arsutgava']
            sys.stderr.write("  WARNING: No beslutsdatum found, setting to %s\n"%d['beslutsdatum'])

        if not 'utkom_fran_tryck' in d:
            d['utkom_fran_tryck'] = "%s-12-31"%d['arsutgava']
            sys.stderr.write("  WARNING: No utkom_fran_tryck found, setting to %s\n" % d['utkom_fran_tryck'])

        if not 'ikrafttradandedatum' in d:
            sys.stderr.write("  WARNING: No ikrafttradandedatum found, setting to beslutsdatum\n")
            d['ikrafttradandedatum'] = d['beslutsdatum']
        if not 'titel' in d:
            d['titel'] = "%s %s:%s" % (fs['kortnamn'], d['arsutgava'], d['lopnummer'])
            sys.stderr.write("  WARNING: No titel found, setting to %s\n" % d['titel'])

        if not 'beslutad_av_id' in sub_d:
            sys.stderr.write("  WARNING: no beslutad_av found, setting to 1\n")
            sub_d['beslutad_av_id'] = '1'
        if not 'utgivare_id' in sub_d:
            sys.stderr.write("  WARNING: no utgivare found, setting to beslutad_av\n")
            sub_d['utgivare_id'] = sub_d['beslutad_av_id']

        # Finally, add clones of the global dicts to the
        # appropriate place in data, and then clear them for
        # recycling
        self.data[table].append(d.copy())
        d.clear()
        if table=='fs_doc_fsdokument':
#.........这里部分代码省略.........
开发者ID:kamidev,项目名称:autobuild_fst,代码行数:103,代码来源:importfeed.py

示例2: loaddata

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import qname [as 别名]
def loaddata(directory):
    data["fs_doc_fsdokument"] = []
    data["fs_doc_myndighetsforeskrift"] = []
    data["fs_doc_konsolideradforeskrift"] = []
    data["fs_doc_allmannarad"] = []
    data["fs_doc_myndighet"] = []
    data["fs_doc_forfattningssamling"] = []
    data["fs_doc_bemyndigandereferens"] = []
    data["fs_doc_myndighetsforeskrift_bemyndiganden"] = []
    data["fs_doc_myndighetsforeskrift_upphavningar"] = []
    data["fs_doc_myndighetsforeskrift_andringar"] = []

    years = [x for x in os.listdir(directory + "/distilled") if x.isdigit()]
    for year in sorted(years):
        rdffiles = [x for x in os.listdir(directory + "/distilled/" + year) if x.endswith(".rdf")]
        for rdffile in sorted(rdffiles, key=lambda x: int(x.split(".")[0])):
            sys.stderr.write("loading %s/%s\n" % (year, rdffile))
            g = Graph()
            g.bind("dct", "http://purl.org/dc/terms/")
            g.bind("rpubl", "http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#")
            g.parse(directory + "/distilled/" + year + "/" + rdffile)

            # first get type
            for (s, p, o) in g:
                if p == RDF.type:
                    if o == RPUBL["MyndighetsForeskrift"]:
                        doctype = "fs_doc_myndighetsforeskrift"
                        table = "fs_doc_fsdokument"
                        targetdir = "foreskrift"
                    elif o == RPUBL["AllmannaRad"]:
                        doctype = "fs_doc_allmannarad"
                        table = "fs_doc_fsdokument"
                        targetdir = "allmanna_rad"
                    elif o == RPUBL["KonsolideradGrundforfattning"]:
                        doctype = "fs_doc_konsolideradforeskrift"
                        table = "fs_doc_konsolideradforeskrift"
                        targetdir = "konsoliderad_foreskrift"
                    else:
                        sys.stderr.write("Can't handle type %s\n" % o)

                    docid = len(data[table]) + 1
                    current_document["id"] = str(docid)
                    current_document["is_published"] = "0"
                    if table == "fs_doc_fsdokument":
                        current_subdocument["fsdokument_ptr_id"] = str(docid)

            # then iterate through other properties, dynamically
            # calling appropriate functions to massage data and put it
            # where it belongs.
            for (s, p, o) in g:
                funcname = g.qname(p).replace(":", "_")
                if funcname in globals():
                    sys.stderr.write("    Calling %s\n" % funcname)
                    globals()[funcname](o, doctype)
                else:
                    sys.stderr.write("  Cant handle predicate %s\n" % funcname.replace("_", ":"))

            # check for required fields:
            d = current_document
            sub_d = current_subdocument

            for fld in ("arsutgava", "lopnummer", "forfattningssamling_id"):
                assert fld in d

            # Move PDF files to their correct place and complement metadata
            pdffile = directory + "/downloaded/" + year + "/" + rdffile.replace(".rdf", ".pdf")
            # Create filename base, eg "FFFS-2011-42"
            fs = data["fs_doc_forfattningssamling"][int(d["forfattningssamling_id"]) - 1]
            basefile = "%s-%s-%s" % (fs["kortnamn"], d["arsutgava"], d["lopnummer"])

            if not os.path.exists(targetdir):
                os.makedirs(targetdir)
            outfile = "%s/%s.pdf" % (targetdir, basefile)
            shutil.copy2(pdffile, outfile)
            sub_d["content"] = outfile

            md5 = hashlib.md5()
            with open(outfile, "rb") as f:
                for chunk in iter(lambda: f.read(8192), ""):
                    md5.update(chunk)
            d["content_md5"] = md5.hexdigest()

            # Make sure all other fields have some sort of data
            if "sammanfattning" not in d:
                d["sammanfattning"] = ""
            if "omtryck" not in d:
                d["omtryck"] = "0"
            if "beslutsdatum" not in d:
                d["beslutsdatum"] = "%s-12-31" % d["arsutgava"]
                sys.stderr.write("  WARNING: No beslutsdatum found, setting to %s\n" % d["beslutsdatum"])

            if "utkom_fran_tryck" not in d:
                d["utkom_fran_tryck"] = "%s-12-31" % d["arsutgava"]
                sys.stderr.write("  WARNING: No utkom_fran_tryck found, setting to %s\n" % d["utkom_fran_tryck"])

            if "ikrafttradandedatum" not in d:
                sys.stderr.write("  WARNING: No ikrafttradandedatum found, setting to beslutsdatum\n")
                d["ikrafttradandedatum"] = d["beslutsdatum"]
            if "titel" not in d:
                d["titel"] = "%s %s:%s" % (fs["kortnamn"], d["arsutgava"], d["lopnummer"])
#.........这里部分代码省略.........
开发者ID:rinfo,项目名称:fst,代码行数:103,代码来源:import.py

示例3: str

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import qname [as 别名]
        for event in el:
            status = None
            if event[0][0] == 144:
                status = "NoteOnEvent"
            elif event[0][0] == 128:
                status = "NoteOffEvent"
            else:
                print "BIG ERROR, unexpected event type {}".format(event[0][0])
            pitch = event[0][1]
            velocity = event[0][2]
            channel = event[0][3]
            timestamp = event[1]
            #print status, pitch, velocity, channel, timestamp
            # Creating triples!
            track_id = uuid.uuid4()
            event = m['track' + str(track_id) + '/event' + str(uuid.uuid4())]
            g.add((event, RDF.type, mid[status]))
            g.add((event, mid.tick, Literal(timestamp)))
            g.add((event, mid.channel, Literal(channel)))
            g.add((event, mid.pitch, Literal(pitch)))
            g.add((event, mid.velocity, Literal(velocity)))
            for s,p,o in g.triples((None, None, None)):
                print g.qname(s),g.qname(p),o,'.'
            g = Graph()



    # wait 10ms - this is arbitrary, but wait(0) still resulted
    # in 100% cpu utilization
    pygame.time.wait(10)
开发者ID:albertmeronyo,项目名称:midi2rdf,代码行数:32,代码来源:stream-midi-rdf.py

示例4:

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import qname [as 别名]
FSL_GAMMAHRF = FSL['FSL_0000007']
FSL_FSLS_GAMMA_HRF = FSL['FSL_0000006']
NIDM_HAS_MRI_PROTOCOL = NIDM['NIDM_0000172']
NIDM_NUMBER_OF_SUBJECTS = NIDM['NIDM_0000171']
NIDM_GROUP_NAME = NIDM['NIDM_0000170']
NIDM_DATA = NIDM['NIDM_0000169']
NIDM_SPM_RESULTS_NIDM = NIDM['NIDM_0000168']
NIDM_NIDMFSL = NIDM['NIDM_0000167']
NIDM_NIDM_RESULTS_EXPORT = NIDM['NIDM_0000166']
NIDM_NIDM_RESULTS_EXPORTER = NIDM['NIDM_0000165']
NIDM_NEUROIMAGING_ANALYSIS_SOFTWARE = NIDM['NIDM_0000164']
NIDM_CONTRAST_EXPLAINED_MEAN_SQUARE_MAP = NIDM['NIDM_0000163']
NIDM_THRESHOLD = NIDM['NIDM_0000162']
NIDM_EQUIVALENT_THRESHOLD = NIDM['NIDM_0000161']
NIDM_P_VALUE_UNCORRECTED = NIDM['NIDM_0000160']
NIDM_P_VALUE_UNCORRECTED_QNAME = q_graph.qname(NIDM_P_VALUE_UNCORRECTED)
NIDM_NOISE_FWHM_IN_VOXELS = NIDM['NIDM_0000159']
NIDM_NOISE_FWHM_IN_VERTICES = NIDM['NIDM_0000158']
NIDM_NOISE_FWHM_IN_UNITS = NIDM['NIDM_0000157']
FSL_FEAT_VERSION = FSL['FSL_0000005']
FSL_DRIFT_CUTOFF_PERIOD = FSL['FSL_0000004']
FSL_TEMPORAL_DERIVATIVE = FSL['FSL_0000003']
FSL_GAUSSIAN_RUNNING_LINE_DRIFT_MODEL = FSL['FSL_0000002']
FSL_FSLS_GAMMA_DIFFERENCE_HRF = FSL['FSL_0000001']
SPM_PARTIAL_CONJUNCTION_DEGREE = SPM['SPM_0000015']
SPM_SMALLEST_SUPRA_THRESHOLD_CLUSTER_SIZE_IN_VOXELS_FWE05 = SPM['SPM_0000014']
SPM_SMALLEST_SUPRA_THRESHOLD_CLUSTER_SIZE_IN_VOXELS_FDR05 = SPM['SPM_0000013']
SPM_SMALLEST_SUPRA_THRESHOLD_CLUSTER_SIZE_IN_VERTICES_FWE05 = SPM['SPM_0000012']
SPM_SMALLEST_SUPRA_THRESHOLD_CLUSTER_SIZE_IN_VERTICES_FDR05 = SPM['SPM_0000011']
SPM_SEARCH_VOLUME_RESELS_GEOMETRY = SPM['SPM_0000010']
SPM_TEMPORAL_DERIVATIVE = SPM['SPM_0000006']
开发者ID:cmaumet,项目名称:nidm,代码行数:33,代码来源:Constants.py

示例5: toc

# 需要导入模块: from rdflib import Graph [as 别名]
# 或者: from rdflib.Graph import qname [as 别名]
    def toc(self):
        """Creates a set of pages that together acts as a table of
        contents for all documents in the repository. For smaller
        repositories a single page might be enough, but for
        repositoriees with a few hundred documents or more, there will
        usually be one page for all documents starting with A,
        starting with B, and so on. There might be different ways of
        browseing/drilling down, i.e. both by title, publication year,
        keyword and so on."""

        # Step 1: Select a table that contains most of the interesting
        # info, eg:
        #
        # URI dct:title dct:issued dct:identifier
        #
        # and convert it to a list of dicts

        # GENERALIZE: Subclasses should be able to change the query by
        # implementing eg self.toc_query()
        sq = """PREFIX dct:<http://purl.org/dc/terms/>
                SELECT ?uri ?title ?id
                WHERE {?uri dct:title ?title .
                       ?uri dct:identifier ?id  }"""
        store = SesameStore(self.config['triplestore'],
                            self.config['repository'],
                            self.context())
        data = store.select(sq,"python")
        
        # Step 2: For each criterion (a criterion is a rdf predicate +
        # selector function like first_letter or year_part + sort
        # function) defined for the class:

        # GENERALIZE: criteria should be initalized from a list in
        # self.toc_categories. The list should be able to be very sparse,
        # like [self.ns['dct']['title'],self.ns['dct']['issued']], and
        # the initialization routine should add the appropriate
        # bindning, label, selector and sorter (at least for standard
        # DCT predicates. 
        criteria = ({'predicate':self.ns['dct']['title'],
                     'binding':'title', # must match sparql query
                     'label':'Sorted by title', # GENERALIZE: This string must me controllable/localizable
                     'selector':lambda x: x[0].lower(),
                     'sorter':cmp,
                     'pages': []},
                    {'predicate':self.ns['dct']['identifier'],
                     'binding':'id',
                     'label':'Sorted by identifier',
                     'selector':lambda x: x[0].lower(),
                     'sorter':cmp,
                     'pages': []})

        g = Graph()
        for qname in self.ns:
            g.bind(qname, self.ns[qname])
                
        for criterion in criteria:
        # 2.1 Create the list of possible values from the selector
        # function and...
            selector_values = {}
            selector = criterion['selector']
            binding = criterion['binding']
            qname = g.qname(criterion['predicate'])
            for row in data:
                selector_values[selector(row[binding])] = True
            
            # 2.1 cont: For each value:
            for value in sorted(selector_values.keys(),cmp=criterion['sorter']):
                # 2.1.1 Prepare a filename based on the rdf predicate and the selector
                #       func value, eg. toc/dct/title/a.xhtml
                tmpfile = os.path.sep.join((self.base_dir,
                                           self.module_dir,
                                           u'toc',
                                           qname.split(":")[0],
                                           qname.split(":")[1],
                                           value.lower()+u".xhtml"))

                # 2.1.2 Collate all selector func values into a list of dicts:
                # [{'label':'A','outfile':'toc/dct/title/a.xhtml',...},
                #   'label':'B:,'outfile':'toc/dct/title/b.xhtml',...}
                criterion['pages'].append({'label':value,
                                           # GENERALIZE: make localizable
                                           # (toc_page(predicate,value))
                                           'title':'Documents starting with "%s"' % value, 
                                           'tmpfile':tmpfile,
                                           'outfile':tmpfile.replace(".xhtml",".html")})
            selector_values = {}

        # 4: Now that we've created neccessary base data for criterion,
        #    iterate through it again

        # GENERALIZE: from this point, criteria is fully loaded and
        # not neccessarily structured around RDF predicates. Sources
        # with more specialized toc requirements (such as having each
        # possible dct:creator as a primary criterion, and years in
        # dct:issued as a secondary) can construct the criteria
        # structure themselves. Therefore, all code above should be a
        # call to toc_criteria() or maybe toc_navigation()
        for criterion in criteria:
            selector = criterion['selector']
            binding = criterion['binding']
#.........这里部分代码省略.........
开发者ID:staffanm,项目名称:legacy.lagen.nu,代码行数:103,代码来源:DocumentRepository.py


注:本文中的rdflib.Graph.qname方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。