本文整理汇总了Python中cms.db.FileCacher.FileCacher.put_file方法的典型用法代码示例。如果您正苦于以下问题:Python FileCacher.put_file方法的具体用法?Python FileCacher.put_file怎么用?Python FileCacher.put_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cms.db.FileCacher.FileCacher
的用法示例。
在下文中一共展示了FileCacher.put_file方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
#.........这里部分代码省略.........
logger.info("Importing files.")
files_dir = os.path.join(self.import_dir, "files")
descr_dir = os.path.join(self.import_dir, "descriptions")
files = set(os.listdir(files_dir))
descr = set(os.listdir(descr_dir))
if not descr <= files:
logger.warning("Some files do not have an associated "
"description.")
if not files <= descr:
logger.warning("Some descriptions do not have an "
"associated file.")
if not (contest_files is None or files <= contest_files):
# FIXME Check if it's because this is a light import
# or because we're skipping submissions or user_tests
logger.warning("The dump contains some files that are "
"not needed by the contest.")
if not (contest_files is None or contest_files <= files):
# The reason for this could be that it was a light
# export that's not being reimported as such.
logger.warning("The contest needs some files that are "
"not contained in the dump.")
# Limit import to files we actually need.
if contest_files is not None:
files &= contest_files
for digest in files:
file_ = os.path.join(files_dir, digest)
desc = os.path.join(descr_dir, digest)
if not self.safe_put_file(file_, desc):
logger.critical("Unable to put file `%s' in the database. "
"Aborting. Please remove the contest "
"from the database." % file_)
# TODO: remove contest from the database.
return False
if contest_id is not None:
logger.info("Import finished (contest id: %s)." %
", ".join(str(id_) for id_ in contest_id))
else:
logger.info("Import finished.")
logger.operation = ""
# If we extracted an archive, we remove it.
if self.import_dir != self.import_source:
rmtree(self.import_dir)
return True
def import_object(self, data):
"""Import objects from the given data (without relationships).
The given data is assumed to be a dict in the format produced by
ContestExporter. This method reads the "_class" item and tries
to find the corresponding class. Then it loads all column
properties of that class (those that are present in the data)
and uses them as keyword arguments in a call to the class
constructor (if a required property is missing this call will
raise an error).
示例2: test_testcases
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
def test_testcases(base_dir, soluzione, assume=None):
global task, file_cacher
# Use a FileCacher with a NullBackend in order to avoid to fill
# the database with junk
if file_cacher is None:
file_cacher = FileCacher(null=True)
# Load the task
# TODO - This implies copying a lot of data to the FileCacher,
# which is annoying if you have to do it continuously; it would be
# better to use a persistent cache (although local, possibly
# filesystem-based instead of database-based) and somehow detect
# when the task has already been loaded
if task is None:
loader = YamlLoader(
os.path.realpath(os.path.join(base_dir, "..")),
file_cacher)
# Normally we should import the contest before, but YamlLoader
# accepts get_task() even without previous get_contest() calls
task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1])
# Prepare the EvaluationJob
dataset = task.active_dataset
digest = file_cacher.put_file(
path=os.path.join(base_dir, soluzione),
description="Solution %s for task %s" % (soluzione, task.name))
executables = {task.name: Executable(filename=task.name, digest=digest)}
job = EvaluationJob(
task_type=dataset.task_type,
task_type_parameters=json.loads(dataset.task_type_parameters),
managers=dict(dataset.managers),
executables=executables,
testcases=dict((t.num, Testcase(t.input, t.output))
for t in dataset.testcases),
time_limit=dataset.time_limit,
memory_limit=dataset.memory_limit)
tasktype = get_task_type(job, file_cacher)
ask_again = True
last_status = "ok"
status = "ok"
stop = False
info = []
points = []
comments = []
for i in job.testcases.keys():
print i,
sys.stdout.flush()
# Skip the testcase if we decide to consider everything to
# timeout
if stop:
info.append("Time limit exceeded")
points.append(0.0)
comments.append("Timeout.")
continue
# Evaluate testcase
last_status = status
tasktype.evaluate_testcase(i)
# print job.evaluations[i]
status = job.evaluations[i]["plus"]["exit_status"]
info.append("Time: %5.3f Wall: %5.3f Memory: %s" %
(job.evaluations[i]["plus"]["execution_time"],
job.evaluations[i]["plus"]["execution_wall_clock_time"],
mem_human(job.evaluations[i]["plus"]["memory_used"])))
points.append(float(job.evaluations[i]["outcome"]))
comments.append(job.evaluations[i]["text"])
# If we saw two consecutive timeouts, ask wether we want to
# consider everything to timeout
if ask_again and status == "timeout" and last_status == "timeout":
print
print "Want to stop and consider everything to timeout? [y/N]",
if assume is not None:
print assume
tmp = assume
else:
tmp = raw_input().lower()
if tmp in ['y', 'yes']:
stop = True
else:
ask_again = False
# Result pretty printing
print
clen = max(len(c) for c in comments)
ilen = max(len(i) for i in info)
for i, (p, c, b) in enumerate(zip(points, comments, info)):
print "%3d) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))
return zip(points, comments, info)
示例3: TestFileCacher
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
class TestFileCacher(TestService):
"""Service that performs automatically some tests for the
FileCacher service.
"""
def __init__(self, shard):
logger.initialize(ServiceCoord("TestFileCacher", shard))
TestService.__init__(self, shard, custom_logger=logger)
# Assume we store the cache in "./cache/fs-cache-TestFileCacher-0/"
self.cache_base_path = os.path.join(config.cache_dir,
"fs-cache-TestFileCacher-0")
self.cache_path = None
self.content = None
self.fake_content = None
self.digest = None
self.file_obj = None
self.file_cacher = FileCacher(self)
#self.file_cacher = FileCacher(self, path="fs-storage")
def prepare(self):
"""Initialization for the test code - make sure that the cache
is empty before testing.
"""
logger.info("Please delete directory %s before." %
self.cache_base_path)
### TEST 000 ###
def test_000(self):
"""Send a ~100B random binary file to the storage through
FileCacher as a file-like object. FC should cache the content
locally.
"""
self.size = 100
self.content = "".join(chr(random.randint(0, 255))
for unused_i in xrange(self.size))
logger.info(" I am sending the ~100B binary file to FileCacher")
try:
data = self.file_cacher.put_file(file_obj=StringIO(self.content),
description="Test #000")
except Exception as error:
self.test_end(False, "Error received: %r." % error)
if not os.path.exists(
os.path.join(self.cache_base_path, "objects", data)):
self.test_end(False, "File not stored in local cache.")
elif open(os.path.join(self.cache_base_path, "objects", data),
"rb").read() != self.content:
self.test_end(False, "Local cache's content differ "
"from original file.")
else:
self.cache_path = os.path.join(self.cache_base_path, "objects",
data)
self.digest = data
self.test_end(True, "Data sent and cached without error.")
### TEST 001 ###
def test_001(self):
"""Retrieve the file.
"""
logger.info(" I am retrieving the ~100B binary file from FileCacher")
self.fake_content = "Fake content.\n"
with open(self.cache_path, "wb") as cached_file:
cached_file.write(self.fake_content)
try:
data = self.file_cacher.get_file(digest=self.digest,
temp_file_obj=True)
except Exception as error:
self.test_end(False, "Error received: %r." % error)
received = data.read()
data.close()
if received != self.fake_content:
if received == self.content:
self.test_end(False,
"Did not use the cache even if it could.")
else:
self.test_end(False, "Content differ.")
else:
self.test_end(True, "Data object received correctly.")
### TEST 002 ###
def test_002(self):
"""Check the size of the file.
"""
logger.info(" I am checking the size of the ~100B binary file")
try:
size = self.file_cacher.get_size(self.digest)
except Exception as error:
self.test_end(False, "Error received: %r." % error)
#.........这里部分代码省略.........
示例4: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
#.........这里部分代码省略.........
session.add(obj)
for _id in self.datas:
self.add_relationships(self.datas[_id], self.objs[_id])
# Mmh... kind of fragile interface
contest = self.objs["0"]
# Check that no files were missing (only if files were
# imported).
if False and not self.no_files:
contest_files = contest.enumerate_files()
missing_files = contest_files.difference(files)
if len(missing_files) > 0:
logger.warning("Some files needed to the contest "
"are missing in the import directory.")
session.flush()
contest_id = contest.id
contest_files = contest.enumerate_files()
session.commit()
if not self.no_files:
logger.info("Importing files.")
files_dir = os.path.join(self.import_dir, "files")
descr_dir = os.path.join(self.import_dir, "descriptions")
for digest in contest_files:
file_ = os.path.join(files_dir, digest)
desc = os.path.join(descr_dir, digest)
if not os.path.exists(file_) or not os.path.exists(desc):
logger.error("Some files needed to the contest "
"are missing in the import directory. "
"The import will continue. Be aware.")
if not self.safe_put_file(file_, desc):
logger.critical("Unable to put file `%s' in the database. "
"Aborting. Please remove the contest "
"from the database." % file_)
# TODO: remove contest from the database.
return False
logger.info("Import finished (contest id: %s)." % contest_id)
logger.operation = ""
# If we extracted an archive, we remove it.
if self.import_dir != self.import_source:
shutil.rmtree(self.import_dir)
return True
def import_object(self, data):
"""Import objects from the given data (without relationships)
The given data is assumed to be a dict in the format produced by
ContestExporter. This method reads the "_class" item and tries
to find the corresponding class. Then it loads all column
properties of that class (those that are present in the data)
and uses them as keyword arguments in a call to the class
constructor (if a required property is missing this call will
raise an error).
Relationships are not handled by this method, since we may not
have all referenced objects available yet. Thus we prefer to add
relationships in a later moment, using the add_relationships
method.
"""
示例5: __init__
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
#.........这里部分代码省略.........
self.import_dir = tempfile.mkdtemp()
archive.extractall(self.import_dir)
self.import_dir = os.path.join(self.import_dir, root)
if self.drop:
logger.info("Dropping and recreating the database.")
try:
metadata.drop_all()
except sqlalchemy.exc.OperationalError as error:
logger.critical("Unable to access DB.\n%r" % error)
return False
try:
metadata.create_all()
except sqlalchemy.exc.OperationalError as error:
logger.critical("Unable to access DB.\n%r" % error)
return False
logger.info("Reading JSON file...")
with open(os.path.join(self.import_dir, "contest.json")) as fin:
contest_json = json.load(fin)
if self.no_submissions:
for user in contest_json["users"]:
user["submissions"] = []
user["user_tests"] = []
if not self.only_files:
with SessionGen(commit=False) as session:
# Import the contest in JSON format.
logger.info("Importing the contest from JSON file.")
contest = Contest.import_from_dict(contest_json)
session.add(contest)
session.flush()
contest_id = contest.id
contest_files = contest.enumerate_files()
session.commit()
if not self.no_files:
logger.info("Importing files.")
files_dir = os.path.join(self.import_dir, "files")
descr_dir = os.path.join(self.import_dir, "descriptions")
for digest in contest_files:
file_ = os.path.join(files_dir, digest)
desc = os.path.join(descr_dir, digest)
print open(desc).read()
if not os.path.exists(file_) or not os.path.exists(desc):
logger.error("Some files needed to the contest "
"are missing in the import directory. "
"The import will continue. Be aware.")
if not self.safe_put_file(file_, desc):
logger.critical("Unable to put file `%s' in the database. "
"Aborting. Please remove the contest "
"from the database." % file_)
# TODO: remove contest from the database.
return False
logger.info("Import finished (contest id: %s)." % contest_id)
logger.operation = ""
# If we extracted an archive, we remove it.
if self.import_dir != self.import_source:
shutil.rmtree(self.import_dir)
return True
def safe_put_file(self, path, descr_path):
"""Put a file to FileCacher signaling every error (including
digest mismatch).
path (string): the path from which to load the file.
descr_path (string): same for description.
return (bool): True if all ok, False if something wrong.
"""
# First read the description.
try:
with open(descr_path) as fin:
description = fin.read()
except IOError:
description = ''
# Put the file.
try:
digest = self.file_cacher.put_file(path=path,
description=description)
except Exception as error:
logger.critical("File %s could not be put to file server (%r), "
"aborting." % (path, error))
return False
# Then check the digest.
calc_digest = sha1sum(path)
if digest != calc_digest:
logger.critical("File %s has hash %s, but the server returned %s, "
"aborting." % (path, calc_digest, digest))
return False
return True
示例6: run
# 需要导入模块: from cms.db.FileCacher import FileCacher [as 别名]
# 或者: from cms.db.FileCacher.FileCacher import put_file [as 别名]
#.........这里部分代码省略.........
lang, team = re.findall("^([A-Za-z0-9_]+) \(([A-Za-z0-9_]+)\)\.pdf$", f)[0]
data_by_lang.add((task, lang, team))
for f in os.listdir(os.path.join(data_dir, task, "by_team")):
# f == "team (lang).pdf"
team, lang = re.findall("^([A-Za-z0-9_]+) \(([A-Za-z0-9_]+)\)\.pdf$", f)[0]
data_by_team.add((task, lang, team))
if data_by_lang != data_by_team:
print "ERROR: PDF files in 'data' are not complete"
print repr(data_by_lang - data_by_team)
print repr(data_by_team - data_by_lang)
return
if task_by_team != data_by_lang:
print "ERROR: PDF files in 'data' do not match JSON data"
print repr(task_by_team - data_by_lang)
print repr(data_by_lang - task_by_team)
return
print "Hooray! Data is consistent!"
# Pick one at random: they're all equal.
translations = task_by_team
# Determine language codes used in CMS.
codes = dict()
# Read JSON files in 'tasks' again as it provides data already
# grouped as we need it, and not simply as a list of tuples.
for t in os.listdir(task_dir):
if t.endswith('.json'):
task = t[:-5]
task_path = os.path.join(task_dir, t)
with open(task_path) as task_file:
data = json.load(task_file)
if "langs" in data:
for lang, v in data["langs"].iteritems():
if len(v) == 0:
pass
elif len(v) == 1 and v[0] != official_team:
for team in v:
codes[(task, lang, team)] = "%s" % lang
else:
for team in v:
codes[(task, lang, team)] = "%s_%s" % (lang, ioi_to_iso2[team])
# Store the files as Statement objects.
file_cacher = FileCacher()
for task, lang, team in translations:
if team == official_team:
assert lang == "en"
digest = file_cacher.put_file(
path=os.path.join(data_dir, task, "by_lang", "%s (%s).pdf" % (lang, team)),
description="Statement for task %s" % task)
else:
digest = file_cacher.put_file(
path=os.path.join(data_dir, task, "by_lang", "%s (%s).pdf" % (lang, team)),
description="Statement for task %s, translated into %s (%s) by %s (%s)" %
(task, langs[lang], lang, teams[team], team))
s = Statement(codes[(task, lang, team)], digest, task=contest.get_task(task))
session.add(s)
session.commit()
primary = dict()
# Retrieve the statements selected by each team.
for t in os.listdir(team_dir):
if t.endswith('.json'):
team = t[:-5]
team_path = os.path.join(team_dir, t)
with open(team_path) as team_file:
data = json.load(team_file)
for team2, lang, task in data.get("selected", []):
# A team could have selected a statement that later got removed.
if (task, lang, team2) in codes:
primary.setdefault(team, {}).setdefault(task, []).append(codes[(task, lang, team2)])
# Add the ones they uploaded themselves.
for task, lang, team in translations:
# Don't worry about duplicates, CWS filters them out.
primary.setdefault(team, {}).setdefault(task, []).append(codes[(task, lang, team)])
# Set the primary statements for tasks (i.e. the ones of the official team)
for task, primary2 in primary.get(official_team, {}).iteritems():
contest.get_task(task).primary_statements = json.dumps(primary2)
# Set the primary statements for teams
for team, primary2 in primary.iteritems():
session.execute("UPDATE users SET primary_statements = '%s' WHERE username LIKE '%s%%';" % (json.dumps(primary2), team))
session.commit()
print "Statements stored in the DB!"