本文整理汇总了Python中jinja2.exceptions方法的典型用法代码示例。如果您正苦于以下问题:Python jinja2.exceptions方法的具体用法?Python jinja2.exceptions怎么用?Python jinja2.exceptions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类jinja2
的用法示例。
在下文中一共展示了jinja2.exceptions方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _render_global_autoindex
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def _render_global_autoindex(self, files):
if self.global_template is None:
return None
if self.template_autoindex is None:
return None
try:
template = self.global_template.get_template(self.template_autoindex.get("filename"))
except jinja2.exceptions.TemplateNotFound as e:
logger.warning("Template file not found. See stacktrace for additional information", exc_info=True)
return None
return template.render(
connection=self,
files=files,
values=self.template_values
)
示例2: __init__
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def __init__(self, track_path):
if not os.path.exists(track_path):
raise exceptions.SystemSetupError("Track path %s does not exist" % track_path)
if os.path.isdir(track_path):
self.track_name = io.basename(track_path)
self._track_dir = track_path
self._track_file = os.path.join(track_path, "track.json")
if not os.path.exists(self._track_file):
raise exceptions.SystemSetupError("Could not find track.json in %s" % track_path)
elif os.path.isfile(track_path):
if io.has_extension(track_path, ".json"):
self._track_dir = io.dirname(track_path)
self._track_file = track_path
self.track_name = io.splitext(io.basename(track_path))[0]
else:
raise exceptions.SystemSetupError("%s has to be a JSON file" % track_path)
else:
raise exceptions.SystemSetupError("%s is neither a file nor a directory" % track_path)
示例3: decompress
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def decompress(self, archive_path, documents_path, uncompressed_size):
if uncompressed_size:
console.info("Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " %
(archive_path, documents_path, convert.bytes_to_gb(uncompressed_size)),
end='', flush=True, logger=self.logger)
else:
console.info("Decompressing track data from [%s] to [%s] ... " % (archive_path, documents_path), end='',
flush=True, logger=self.logger)
io.decompress(archive_path, io.dirname(archive_path))
console.println("[OK]")
if not os.path.isfile(documents_path):
raise exceptions.DataError("Decompressing [%s] did not create [%s]. Please check with the track author if the compressed "
"archive has been created correctly." % (archive_path, documents_path))
extracted_bytes = os.path.getsize(documents_path)
if uncompressed_size is not None and extracted_bytes != uncompressed_size:
raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." %
(documents_path, extracted_bytes, uncompressed_size))
示例4: filters_from_filtered_tasks
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def filters_from_filtered_tasks(filtered_tasks):
filters = []
if filtered_tasks:
for t in filtered_tasks:
spec = t.split(":")
if len(spec) == 1:
filters.append(track.TaskNameFilter(spec[0]))
elif len(spec) == 2:
if spec[0] == "type":
filters.append(track.TaskOpTypeFilter(spec[1]))
else:
raise exceptions.SystemSetupError(
"Invalid format for filtered tasks: [%s]. Expected [type] but got [%s]." % (t, spec[0]))
else:
raise exceptions.SystemSetupError("Invalid format for filtered tasks: [%s]" % t)
return filters
示例5: _render_file_template
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def _render_file_template(self, filename):
filename = filename[len(self.root):] + self.template_file_extension
filename = filename.lstrip("/")
if self.file_template is None:
return None
try:
template = self.file_template.get_template(filename)
except jinja2.exceptions.TemplateNotFound:
# ToDo: Do we need this?
# logger.warning("Template file not found. See stacktrace for additional information", exc_info=True)
return None
return template.render(
values=self.template_values
)
示例6: _render_global_template
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def _render_global_template(self, code, message):
if self.global_template is None:
return None
if self.template_error_pages is None:
return None
for tpl in self.template_error_pages:
tpl_codes = tpl.get("codes")
if tpl_codes and code not in tpl_codes:
continue
tpl_filename = tpl.get("filename")
if not tpl_filename:
logger.warning("Template filename not set")
continue
try:
template = self.global_template.get_template(
name=tpl_filename.format(
code=code
)
)
except jinja2.exceptions.TemplateNotFound as e:
logger.warning("Template file not found. See stacktrace for additional information", exc_info=True)
return None
if template:
return template.render(
code=code,
message=message,
values=self.template_values
)
示例7: load_track
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def load_track(cfg):
"""
Loads a track
:param cfg: The config object. It contains the name of the track to load.
:return: The loaded track.
"""
track_name = None
try:
repo = track_repo(cfg)
track_name = repo.track_name
track_dir = repo.track_dir(track_name)
reader = TrackFileReader(cfg)
filtered_tasks = []
exclude = False
if cfg.opts("track", "include.tasks"):
filtered_tasks = cfg.opts("track", "include.tasks")
else:
filtered_tasks = cfg.opts("track", "exclude.tasks")
exclude = True
current_track = reader.read(track_name, repo.track_file(track_name), track_dir)
current_track = filter_tasks(current_track, filters_from_filtered_tasks(filtered_tasks), exclude)
plugin_reader = TrackPluginReader(track_dir)
current_track.has_plugins = plugin_reader.can_load()
if cfg.opts("track", "test.mode.enabled"):
return post_process_for_test_mode(current_track)
else:
return current_track
except FileNotFoundError:
logging.getLogger(__name__).exception("Cannot load track [%s]", track_name)
raise exceptions.SystemSetupError("Cannot load track %s. List the available tracks with %s list tracks." %
(track_name, PROGRAM_NAME))
except BaseException:
logging.getLogger(__name__).exception("Cannot load track [%s]", track_name)
raise
示例8: create_file_offset_table
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def create_file_offset_table(self, document_file_path, expected_number_of_lines):
# just rebuild the file every time for the time being. Later on, we might check the data file fingerprint to avoid it
lines_read = io.prepare_file_offset_table(document_file_path)
if lines_read and lines_read != expected_number_of_lines:
io.remove_file_offset_table(document_file_path)
raise exceptions.DataError("Data in [%s] for track [%s] are invalid. Expected [%d] lines but got [%d]."
% (document_file_path, track, expected_number_of_lines, lines_read))
示例9: prepare_bundled_document_set
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def prepare_bundled_document_set(self, document_set, data_root):
"""
Prepares a document set that comes "bundled" with the track, i.e. the data files are in the same directory as the track.
This is a "lightweight" version of #prepare_document_set() which assumes that at least one file is already present in the
current directory. It will attempt to find the appropriate files, decompress if necessary and create a file offset table.
Precondition: The document set contains either a compressed or an uncompressed document file reference.
Postcondition: If this method returns ``True``, the following files will be present locally:
* The compressed document file (if specified originally in the corpus)
* The uncompressed document file
* A file offset table based on the document file
If this method returns ``False`` either the document size is wrong or any files have not been found.
:param document_set: A document set.
:param data_root: The data root directory for this document set (should be the same as the track file).
:return: See postcondition.
"""
doc_path = os.path.join(data_root, document_set.document_file)
archive_path = os.path.join(data_root, document_set.document_archive) if document_set.has_compressed_corpus() else None
while True:
if self.is_locally_available(doc_path):
if self.has_expected_size(doc_path, document_set.uncompressed_size_in_bytes):
self.create_file_offset_table(doc_path, document_set.number_of_lines)
return True
else:
raise exceptions.DataError("%s is present but does not have the expected size of %s bytes." %
(doc_path, str(document_set.uncompressed_size_in_bytes)))
if document_set.has_compressed_corpus() and self.is_locally_available(archive_path):
if self.has_expected_size(archive_path, document_set.compressed_size_in_bytes):
self.decompress(archive_path, doc_path, document_set.uncompressed_size_in_bytes)
else:
# treat this is an error because if the file is present but the size does not match, something is really fishy.
# It is likely that the user is currently creating a new track and did not specify the file size correctly.
raise exceptions.DataError("%s is present but does not have the expected size of %s bytes." %
(archive_path, str(document_set.compressed_size_in_bytes)))
else:
return False
示例10: load
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def load(self):
root_module = self.loader.load()
try:
# every module needs to have a register() method
root_module.register(self)
except BaseException:
msg = "Could not register track plugin at [%s]" % self.loader.root_path
logging.getLogger(__name__).exception(msg)
raise exceptions.SystemSetupError(msg)
示例11: parse_operation
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def parse_operation(self, op_spec, error_ctx="operations"):
# just a name, let's assume it is a simple operation like force-merge and create a full operation
if isinstance(op_spec, str):
op_name = op_spec
meta_data = None
op_type_name = op_spec
param_source = None
# Cannot have parameters here
params = {}
else:
meta_data = self._r(op_spec, "meta", error_ctx=error_ctx, mandatory=False)
# Rally's core operations will still use enums then but we'll allow users to define arbitrary operations
op_type_name = self._r(op_spec, "operation-type", error_ctx=error_ctx)
# fallback to use the operation type as the operation name
op_name = self._r(op_spec, "name", error_ctx=error_ctx, mandatory=False, default_value=op_type_name)
param_source = self._r(op_spec, "param-source", error_ctx=error_ctx, mandatory=False)
# just pass-through all parameters by default
params = op_spec
try:
op = track.OperationType.from_hyphenated_string(op_type_name)
if "include-in-reporting" not in params:
params["include-in-reporting"] = not op.admin_op
op_type = op.name
self.logger.debug("Using built-in operation type [%s] for operation [%s].", op_type, op_name)
except KeyError:
self.logger.info("Using user-provided operation type [%s] for operation [%s].", op_type_name, op_name)
op_type = op_type_name
try:
return track.Operation(name=op_name, meta_data=meta_data, operation_type=op_type, params=params, param_source=param_source)
except exceptions.InvalidSyntax as e:
raise TrackSyntaxError("Invalid operation [%s]: %s" % (op_name, str(e)))
示例12: download
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def download(self, base_url, target_path, size_in_bytes, detail_on_missing_root_url):
file_name = os.path.basename(target_path)
if not base_url:
raise exceptions.DataError("%s and it cannot be downloaded because no base URL is provided."
% detail_on_missing_root_url)
if self.offline:
raise exceptions.SystemSetupError("Cannot find %s. Please disable offline mode and retry again." % target_path)
data_url = "%s/%s" % (base_url, file_name)
try:
io.ensure_dir(os.path.dirname(target_path))
if size_in_bytes:
size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
self.logger.info("Downloading data from [%s] (%s MB) to [%s].", data_url, size_in_mb, target_path)
else:
self.logger.info("Downloading data from [%s] to [%s].", data_url, target_path)
# we want to have a bit more accurate download progress as these files are typically very large
progress = net.Progress("[INFO] Downloading data for track %s" % self.track_name, accuracy=1)
net.download(data_url, target_path, size_in_bytes, progress_indicator=progress)
progress.finish()
self.logger.info("Downloaded data from [%s] to [%s].", data_url, target_path)
except urllib.error.HTTPError as e:
if e.code == 404 and self.test_mode:
raise exceptions.DataError("Track [%s] does not support test mode. Please ask the track author to add it or "
"disable test mode and retry." % self.track_name)
else:
msg = "Could not download [%s] to [%s]" % (data_url, target_path)
if e.reason:
msg += " (HTTP status: %s, reason: %s)" % (str(e.code), e.reason)
else:
msg += " (HTTP status: %s)" % str(e.code)
raise exceptions.DataError(msg)
except urllib.error.URLError:
self.logger.exception("Could not download [%s] to [%s].", data_url, target_path)
raise exceptions.DataError("Could not download [%s] to [%s]." % (data_url, target_path))
if not os.path.isfile(target_path):
raise exceptions.SystemSetupError(
"Cannot download from %s to %s. Please verify that data are available at %s and "
"check your Internet connection." % (data_url, target_path, data_url))
actual_size = os.path.getsize(target_path)
if size_in_bytes is not None and actual_size != size_in_bytes:
raise exceptions.DataError("[%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected." %
(target_path, actual_size, size_in_bytes))
示例13: post_process_for_test_mode
# 需要导入模块: import jinja2 [as 别名]
# 或者: from jinja2 import exceptions [as 别名]
def post_process_for_test_mode(t):
logger = logging.getLogger(__name__)
logger.info("Preparing track [%s] for test mode.", str(t))
for corpus in t.corpora:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Reducing corpus size to 1000 documents for [%s]", corpus.name)
for document_set in corpus.documents:
# TODO #341: Should we allow this for snapshots too?
if document_set.is_bulk:
document_set.number_of_documents = 1000
if document_set.has_compressed_corpus():
path, ext = io.splitext(document_set.document_archive)
path_2, ext_2 = io.splitext(path)
document_set.document_archive = "%s-1k%s%s" % (path_2, ext_2, ext)
document_set.document_file = "%s-1k%s" % (path_2, ext_2)
elif document_set.has_uncompressed_corpus():
path, ext = io.splitext(document_set.document_file)
document_set.document_file = "%s-1k%s" % (path, ext)
else:
raise exceptions.RallyAssertionError("Document corpus [%s] has neither compressed nor uncompressed corpus." %
corpus.name)
# we don't want to check sizes
document_set.compressed_size_in_bytes = None
document_set.uncompressed_size_in_bytes = None
for challenge in t.challenges:
for task in challenge.schedule:
# we need iterate over leaf tasks and await iterating over possible intermediate 'parallel' elements
for leaf_task in task:
# iteration-based schedules are divided among all clients and we should provide at least one iteration for each client.
if leaf_task.warmup_iterations is not None and leaf_task.warmup_iterations > leaf_task.clients:
count = leaf_task.clients
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Resetting warmup iterations to %d for [%s]", count, str(leaf_task))
leaf_task.warmup_iterations = count
if leaf_task.iterations is not None and leaf_task.iterations > leaf_task.clients:
count = leaf_task.clients
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Resetting measurement iterations to %d for [%s]", count, str(leaf_task))
leaf_task.iterations = count
if leaf_task.warmup_time_period is not None and leaf_task.warmup_time_period > 0:
leaf_task.warmup_time_period = 0
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Resetting warmup time period for [%s] to [%d] seconds.", str(leaf_task), leaf_task.warmup_time_period)
if leaf_task.time_period is not None and leaf_task.time_period > 10:
leaf_task.time_period = 10
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Resetting measurement time period for [%s] to [%d] seconds.", str(leaf_task), leaf_task.time_period)
leaf_task.params.pop("target-throughput", None)
leaf_task.params.pop("target-interval", None)
return t