本文整理汇总了Python中reporter.Reporter.write_file方法的典型用法代码示例。如果您正苦于以下问题:Python Reporter.write_file方法的具体用法?Python Reporter.write_file怎么用?Python Reporter.write_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reporter.Reporter
的用法示例。
在下文中一共展示了Reporter.write_file方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_update_query
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
update_query = create_update_query(
res_entity_g, spec_entity,
ids_per_date[generation_date] if generation_date in ids_per_date else [],
True)
else:
reperr.add_sentence("No type for '%s'" % res)
if description is not None:
cur_se = add_creation_info(new_prov, spec_entity, str(idx), generation_date,
str((idx - 1) * 2 + 1), description, "1", "2",
PROV.Create if idx == 1 else PROV.Modify)
if idx > 1:
add_modification_info(new_prov, spec_entity, str(idx),
generation_date, update_query)
se_source = get_source(
sources, sources_per_date, generation_date, str(cur_se))
if se_source is not None:
new_prov.add((cur_se, PROV.hadPrimarySource, se_source))
store(new_prov, str(spec_entity), args.dest_dir)
except Exception as e:
reperr.add_sentence("Last res: %s. %s" % (last_res, e))
if result:
for it in result:
print it
if not reperr.is_empty():
reperr.write_file("fix_prov_to_clashing_updates_%s_.rep.err.txt" %
datetime.now().strftime('%Y_%m_%dT%H_%M_%S'))
示例2: str
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
base_dir = cur_dir + os.sep + str(res_count)
cur_file = base_dir + ".json"
if os.path.exists(cur_file):
# Load resource data
cur_g = load(cur_g, cur_file, args.tmp_dir)
file_done += [cur_file]
if os.path.exists(base_dir):
dir_done += [base_dir]
# Load provenance data
base_prov_dir = base_dir + os.sep + "prov" + os.sep
for cur_prov_file in \
glob.iglob(base_prov_dir + "se" + os.sep + "[0-9]*.json"):
cur_prov_se = load(cur_prov_se, cur_prov_file, args.tmp_dir)
for cur_prov_file in \
glob.iglob(base_prov_dir + "ca" + os.sep + "[0-9]*.json"):
cur_prov_ca = load(cur_prov_ca, cur_prov_file, args.tmp_dir)
for cur_prov_file in \
glob.iglob(base_prov_dir + "cr" + os.sep + "[0-9]*.json"):
cur_prov_cr = load(cur_prov_cr, cur_prov_file, args.tmp_dir)
else:
# Store the last graph and process another directory
store(cur_g, [cur_prov_se, cur_prov_ca, cur_prov_cr],
new_file, new_prov_files, file_done, dir_done)
break
# repok.write_file("organize_files.rep.ok.txt")
reperr.write_file("organize_files.rep.%s.err.txt" % re.sub("[\.%s/]" % os.sep, "_", args.input))
示例3: load
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
if not os.path.exists(final_dir):
os.makedirs(final_dir)
for cur_file in [cur_se_path, cur_ca_path, cur_cr_path]:
cur_result = {}
cur_g = load(cur_file, args.tmp_dir)
final_file = cur_file[:-5].replace(args.input, args.output + os.sep) + ".txt"
for res_g in cur_g.contexts():
cur_iri = res_g.identifier.replace("/prov/", "")
res_line = find_local_line_id(cur_iri, 1000)
cur_result[res_line] = len(set(res_g.subjects()))
ordered_result = collections.OrderedDict(sorted(cur_result.items()))
all_lines = []
for line_number in ordered_result:
value = ordered_result.get(line_number)
line_len = len(all_lines)
zero_line_number = line_number - 1
for i in range(line_number):
if i >= line_len:
all_lines += ["\n"]
if i == zero_line_number:
all_lines[i] = str(value) + "\n"
with open(final_file, "wb") as g:
g.writelines(all_lines)
if not reperr.is_empty():
reperr.write_file("create_id_counter%s_.rep.err.txt" %
datetime.now().strftime('%Y_%m_%dT%H_%M_%S'))
示例4: load
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
res_file_path = re.sub("^(.+)/%s.*$" % ca_dir, "\\1.json", cur_dir)
cur_g = load(res_file_path)
new_update_data = create_update_query(
cur_g, invalidation_time, args.id_dir, int(args.dir_split))
ca2_file_path = cur_dir + os.sep + "2.json"
g_prov_ca_2 = load(ca2_file_path)
cur_ca_2 = g_prov_ca_2.subjects(None, None).next()
g_prov_ca_2.remove((cur_ca_2, RDF.type, PROV.Create))
g_prov_ca_2.add((cur_ca_2, RDF.type, PROV.Modify))
old_description = g_prov_ca_2.objects(cur_ca_2, DCTERMS.description).next()
new_description = "extended with"
if new_update_data[1]:
new_description += " citation data"
if new_update_data[2]:
new_description += " and"
if new_update_data[2]:
new_description += " new identifiers"
g_prov_ca_2.remove((cur_ca_2, DCTERMS.description, None))
g_prov_ca_2.add((cur_ca_2, DCTERMS.description, Literal(
old_description
.replace("extended with citation data", new_description)
.replace("created", new_description))))
store(g_prov_ca_2, ca2_file_path)
# repok.write_file("fix_prov.rep.ok.txt")
reperr.write_file("fix_prov.rep.err.txt")
示例5: isinstance
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
if "invalidated_by" in cur_graph:
cur_invalidated_by = cur_graph["invalidated_by"]
if isinstance(cur_invalidated_by, list):
se_generated_by += cur_invalidated_by
else:
se_generated_by += [cur_invalidated_by]
generated = sorted(list(set(generated)))
se_generated_by = sorted(list(set(se_generated_by)))
sen_string = item["iri"] + "se/1" + ",[%s]," % str(len(generated))
for ca_item in cur_ca["@graph"]:
found = False
for cur_ca_graph in ca_item["@graph"]:
if cur_ca_graph["iri"] in se_generated_by:
found = True
all_descs = cur_ca_graph["description"]
descs = all_descs if isinstance(all_descs, list) else [all_descs]
for desc in descs:
if "citation data and new identifiers" in desc:
sen_string += "[CIT+ID]"
elif "citation data" in desc:
sen_string += "[CIT]"
elif "new identifiers" in desc:
sen_string += "[ID]"
if found:
rep.add_sentence(sen_string)
break
rep.write_file(args.o_file)
示例6: open
# 需要导入模块: from reporter import Reporter [as 别名]
# 或者: from reporter.Reporter import write_file [as 别名]
arg_parser.add_argument("-o", "--output", dest="output", required=True,
help="The output file.")
arg_parser.add_argument("-t", "--tmp_dir", dest="tmp_dir",
help="The directory for easing the RDF loading.")
arg_parser.add_argument("-c", "--context", dest="context", required=True,
help="The JSON-LD context to use.")
args = arg_parser.parse_args()
with open(args.context) as f:
context_json = json.load(f)
repok = Reporter(True, prefix="[create_nq.py: INFO] ")
reperr = Reporter(True, prefix="[create_nq.py: ERROR] ")
repok.new_article()
reperr.new_article()
for cur_dir, cur_subdir, cur_files in os.walk(args.input):
with open(args.output, 'a') as f:
for cur_file in cur_files:
if cur_file.endswith(".json"):
cur_g = ConjunctiveGraph()
cur_g = load(cur_g, cur_dir + os.sep + cur_file, args.tmp_dir)
nt_strings = cur_g.serialize(format="nquads")
f.write(nt_strings)
repok.add_sentence("Done.")
if not reperr.is_empty():
reperr.write_file("create_nq.rep.%s.err.txt" % (
re.sub("_+", "_", re.sub("[\.%s/]" % os.sep, "_", args.input))))