当前位置: 首页>>代码示例>>Python>>正文


Python Reporter.add_sentence方法代码示例

本文整理汇总了Python中reporter.Reporter.add_sentence方法的典型用法代码示例。如果您正苦于以下问题:Python Reporter.add_sentence方法的具体用法?Python Reporter.add_sentence怎么用?Python Reporter.add_sentence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在reporter.Reporter的用法示例。


在下文中一共展示了Reporter.add_sentence方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Checker

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
class Checker(object):
    def __init__(self, input_dir, output_dir=None, tmp_dir=None):
        self.input_dir = input_dir
        self.output_dir = output_dir
        self.tmp_dir = tmp_dir
        self.storer = Storer()
        self.name = self.__class__.__name__
        self.repok = Reporter(prefix="[%s - INFO] " % self.name)
        self.repok.new_article()
        self.reper = Reporter(prefix="[%s - ERROR] " % self.name)
        self.reper.new_article()

    def process(self):
        for cur_dir, cur_subdir, cur_files in os.walk(self.input_dir):
            for cur_file in cur_files:
                self.repok.new_article()
                self.reper.new_article()
                cur_rdf_path = cur_dir + os.sep + cur_file
                try:
                    self.repok.add_sentence("Processing '%s'" % cur_rdf_path)
                    g = self.storer.load(cur_rdf_path, tmp_dir=self.tmp_dir)
                    if self.output_dir is None:
                        self.repok.add_sentence("The RDF graph has been converted in TRIG as follows:\n%s"
                                                % g.serialize(format="trig"))
                    else:
                        if not os.path.exists(self.output_dir):
                            os.makedirs(self.output_dir)
                        output_file = self.output_dir + os.sep + "converted_" + cur_file + ".ttl"
                        self.repok.add_sentence("The RDF graph has been stored in %s"
                                                % (output_file, g.serialize(output_file, format="trig")))
                except Exception:
                    self.reper.add_sentence("The file '%s' doesn't contain RDF statements", False)
开发者ID:essepuntato,项目名称:opencitations,代码行数:34,代码来源:checker.py

示例2: URIRef

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
         last_res = res
         prov_entity = URIRef(res)
         
         if args.id:
             prov_g = load(res)
             prov_entity_g = get_entity_graph(res, prov_g)
             spec_entity = prov_entity_g.value(prov_entity, PROV.specializationOf)
             res_g = load(str(spec_entity))
             res_entity_g = get_entity_graph(spec_entity, res_g)
             for id_entity in [o for s, p, o in list(
                     res_entity_g.triples((spec_entity, DATACITE.hasIdentifier, None)))]:
                 rdf_dir, rdf_file_path = find_paths(
                     id_entity, args.base + os.sep, "https://w3id.org/oc/corpus/", 10000, 1000)
                 result.add(rdf_file_path)
         else:
             repok.add_sentence("Processing '%s'" % res)
             
             prov_g = load(res)
             spec_entity_iri = res.split("/prov/")[0]
             prov_entity_g = get_entity_graph(res, prov_g, True)
 
             generation_dates = [o for s, p, o in
                                 list(prov_entity_g.triples(
                                     (None, PROV.generatedAtTime, None)))]
             sources = [o for s, p, o in
                        list(prov_entity_g.triples((None, PROV.hadPrimarySource, None)))]
             
             # Get all identifiers creation dates and sources
             spec_entity = URIRef(spec_entity_iri)
             res_g = load(str(spec_entity))
             res_entity_g = get_entity_graph(spec_entity, res_g)
开发者ID:essepuntato,项目名称:opencitations,代码行数:33,代码来源:fix_prov_to_clashing_updates.py

示例3: open

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
                            help="The max number of resources a file can contain.")
    arg_parser.add_argument("-t", "--tmp_dir", dest="tmp_dir",
                            help="The directory for easing the RDF loading.")
    arg_parser.add_argument("-c", "--context", dest="context", required=True,
                            help="The JSON-LD context to use.")

    args = arg_parser.parse_args()

    with open(args.context) as f:
        context_json = json.load(f)

    if do_file_exist(args.input):
        res_count = 0
        dir_count = 0
        new_dir = None
        repok.add_sentence("Organize all files in directory each containing at "
                           "most %s resources" % args.dir_split)
        new_dirs = []
        new_files = []
        while True:
            res_count += 1
            if res_count > dir_count:
                dir_count += long(args.dir_split)
                new_dir = args.input + os.sep + "n_" + str(dir_count)
                new_dirs += [new_dir]
            src_dir = args.input + os.sep + str(res_count)
            dst_dir = new_dir + os.sep + str(res_count)
            src_file = src_dir + ".json"
            if os.path.exists(src_file):
                try:
                    if os.path.exists(src_dir):
                        os.renames(src_dir, dst_dir)
开发者ID:essepuntato,项目名称:opencitations,代码行数:34,代码来源:organize_files.py

示例4: isinstance

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
                        if "invalidated_by" in cur_graph:
                            cur_invalidated_by = cur_graph["invalidated_by"]
                            if isinstance(cur_invalidated_by, list):
                                se_generated_by += cur_invalidated_by
                            else:
                                se_generated_by += [cur_invalidated_by]
                        
                generated = sorted(list(set(generated)))
                se_generated_by = sorted(list(set(se_generated_by)))
                sen_string = item["iri"] + "se/1" + ",[%s]," % str(len(generated))
                        
                for ca_item in cur_ca["@graph"]:
                    found = False
                    for cur_ca_graph in ca_item["@graph"]:
                        if cur_ca_graph["iri"] in se_generated_by:
                            found = True
                            all_descs = cur_ca_graph["description"]
                            descs = all_descs if isinstance(all_descs, list) else [all_descs]
                            for desc in descs:
                                if "citation data and new identifiers" in desc:
                                    sen_string += "[CIT+ID]"
                                elif "citation data" in desc:
                                    sen_string += "[CIT]"
                                elif "new identifiers" in desc:
                                    sen_string += "[ID]"
                    if found:
                        rep.add_sentence(sen_string)
                        break

    rep.write_file(args.o_file)
开发者ID:essepuntato,项目名称:opencitations,代码行数:32,代码来源:find_prov_issues.py

示例5: open

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
    arg_parser.add_argument("-o", "--output", dest="output", required=True,
                            help="The output file.")
    arg_parser.add_argument("-t", "--tmp_dir", dest="tmp_dir",
                            help="The directory for easing the RDF loading.")
    arg_parser.add_argument("-c", "--context", dest="context", required=True,
                            help="The JSON-LD context to use.")

    args = arg_parser.parse_args()

    with open(args.context) as f:
        context_json = json.load(f)

    repok = Reporter(True, prefix="[create_nq.py: INFO] ")
    reperr = Reporter(True, prefix="[create_nq.py: ERROR] ")
    repok.new_article()
    reperr.new_article()

    for cur_dir, cur_subdir, cur_files in os.walk(args.input):
        with open(args.output, 'a') as f:
            for cur_file in cur_files:
                if cur_file.endswith(".json"):
                    cur_g = ConjunctiveGraph()
                    cur_g = load(cur_g, cur_dir + os.sep + cur_file, args.tmp_dir)
                    nt_strings = cur_g.serialize(format="nquads")
                    f.write(nt_strings)

    repok.add_sentence("Done.")
    if not reperr.is_empty():
        reperr.write_file("create_nq.rep.%s.err.txt" % (
            re.sub("_+", "_", re.sub("[\.%s/]" % os.sep, "_", args.input))))
开发者ID:essepuntato,项目名称:opencitations,代码行数:32,代码来源:create_nq.py

示例6: Storer

# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import add_sentence [as 别名]
class Storer(object):

    def __init__(self, graph_set=None, repok=None, reperr=None,
                 context_map={}, dir_split=0, n_file_item=1):
        self.dir_split = dir_split
        self.n_file_item = n_file_item
        self.context_map = context_map
        for context_url in context_map:
            context_file_path = context_map[context_url]
            with open(context_file_path) as f:
                context_json = json.load(f)
                self.context_map[context_url] = context_json

        if graph_set is None:
            self.g = []
        else:
            self.g = graph_set.graphs()
        if repok is None:
            self.repok = Reporter(prefix="[Storer: INFO] ")
        else:
            self.repok = repok
        if reperr is None:
            self.reperr = Reporter(prefix="[Storer: ERROR] ")
        else:
            self.reperr = reperr
        self.preface_query = ""

    def store_all(self, base_dir, base_iri, context_path, tmp_dir=None, g_set=[], override=False):
        for g in g_set:
            self.g += [g]

        self.repok.new_article()
        self.reperr.new_article()

        self.repok.add_sentence("Starting the process")

        processed_graphs = {}
        for cur_g in self.g:
            processed_graphs = self.store(cur_g, base_dir, base_iri, context_path, tmp_dir,
                                          override, processed_graphs, False)

        stored_graph_path = []
        for cur_file_path in processed_graphs:
            stored_graph_path += [cur_file_path]
            self.__store_in_file(processed_graphs[cur_file_path], cur_file_path, context_path)

        return stored_graph_path

    def upload_and_store(self, base_dir, triplestore_url, base_iri, context_path,
                         tmp_dir=None, g_set=[], override=False):

        stored_graph_path = self.store_all(base_dir, base_iri, context_path, tmp_dir, g_set, override)

        # Some graphs were not stored properly, then no one will be updloaded to the triplestore
        # but we highlights those ones that could be added in principle, by mentioning them
        # with a ".notupdloaded" marker
        if None in stored_graph_path:
            for file_path in stored_graph_path:
                # Create a marker for the file not uploaded in the triplestore
                open("%s.notuploaded" % file_path, "w").close()
                self.reperr.add_sentence("[6] "
                                         "The statements of in the JSON-LD file '%s' were not "
                                         "uploaded into the triplestore." % file_path)
        else:  # All the files have been stored
            self.upload_all(self.g, triplestore_url, base_dir)

    def __query(self, query_string, triplestore_url, n_statements=None, base_dir=None):
        if query_string != "":
            try:
                tp = SPARQLWrapper(triplestore_url)
                tp.setMethod('POST')
                tp.setQuery(query_string)
                tp.query()

                if n_statements is None:
                    self.repok.add_sentence(
                        "Triplestore updated by means of a SPARQL Update query.")
                else:
                    self.repok.add_sentence(
                        "Triplestore updated with %s more RDF statements." % n_statements)

                return True

            except Exception as e:
                self.reperr.add_sentence("[1] "
                                         "Graph was not loaded into the "
                                         "triplestore due to communication problems: %s" % str(e))
                if base_dir is not None:
                    tp_err_dir = base_dir + os.sep + "tp_err"
                    if not os.path.exists(tp_err_dir):
                        os.makedirs(tp_err_dir)
                    cur_file_err = tp_err_dir + os.sep + \
                                   datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f_not_uploaded.txt')
                    with io.open(cur_file_err, "w", encoding="utf-8") as f:
                        f.write(query_string)

        return False

    def upload_all(self, all_g, triplestore_url, base_dir):
        result = True
#.........这里部分代码省略.........
开发者ID:essepuntato,项目名称:opencitations,代码行数:103,代码来源:storer.py


注:本文中的reporter.Reporter.add_sentence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。