本文整理汇总了Python中quex.engine.misc.file_in.error_msg函数的典型用法代码示例。如果您正苦于以下问题:Python error_msg函数的具体用法?Python error_msg怎么用?Python error_msg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error_msg函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse
def parse(fh):
"""This function parses a mode description and enters it into the
'blackboard.mode_description_db'. Once all modes are parsed
they can be translated into 'real' modes and are located in
'blackboard.mode_db'.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
skip_whitespace(fh)
mode_name = read_identifier(fh, OnMissingStr="Missing identifier at beginning of mode definition.")
# NOTE: constructor does register this mode in the mode_db
new_mode = ModeDescription(mode_name, SourceRef.from_FileHandle(fh))
# (*) inherited modes / option_db
skip_whitespace(fh)
dummy = fh.read(1)
if dummy not in [":", "{"]:
error_msg("missing ':' or '{' after mode '%s'" % mode_name, fh)
if dummy == ":":
__parse_option_list(new_mode, fh)
# (*) read in pattern-action pairs and events
while __parse_element(new_mode, fh):
pass
示例2: utf8_to_unicode
def utf8_to_unicode(ByteSequence):
"""Unfortunately, there is no elegant way to do the utf8-decoding
safely in libPython, since due to strange behavior of a python
narrow build a character >= 0x10000 may appear as a 2 byte string
and cannot be handled by 'ord' in python 2.x.
Thus:
utf8d = codecs.getdecoder("utf-8")
return ord(utf8d("".join(map(chr, ByteSequence)))[0])
would be unsafe. That's why we do it by hand here
"""
# Assume that the byte sequence is valid, thus a byte sequence of length 'N'
# has a N - 1 leading ones in the header plus a zero. Remaining bits in the
# header are therefore 8 - N. All other bytes in the sequence start with bits '10'
# and contain 6 bits of useful payload.
header_bit_n = 8 - len(ByteSequence)
mask = (1 << header_bit_n) - 1
value = ByteSequence[0] & mask
for byte in ByteSequence[1:]:
value <<= 6
value |= (byte & 0x3F) # blend off the highest two bits
# The highest two bits in a follow byte in utf8 MUST be '10'. Thus:
if (byte & 0xC0) != 0x80:
error_msg("Error in UTF8 encoded file. Inadmissible byte sequence detected. Found byte '%02X'" % byte)
return value
示例3: parse
def parse(fh, CodeFragmentName,
ErrorOnFailureF=True, AllowBriefTokenSenderF=True, ContinueF=True):
"""RETURNS: An object of class UserCodeFragment containing
line number, filename, and the code fragment.
None in case of failure.
"""
assert Setup.__class__ == QuexSetup
assert type(ErrorOnFailureF) == bool
assert type(AllowBriefTokenSenderF) == bool
skip_whitespace(fh)
word = fh.read(2)
if len(word) >= 1 and word[0] == "{":
fh.seek(-1, 1) # unput the second character
return __parse_normal(fh, CodeFragmentName)
elif AllowBriefTokenSenderF and word == "=>":
return __parse_brief_token_sender(fh, ContinueF)
elif not ErrorOnFailureF:
fh.seek(-2,1)
return None
else:
error_msg("Missing code fragment after %s definition." % CodeFragmentName, fh)
示例4: __parse_brief_token_sender
def __parse_brief_token_sender(fh, ContinueF):
# shorthand for { self.send(TKN_SOMETHING); QUEX_SETTING_AFTER_SEND_CONTINUE_OR_RETURN(); }
LanguageDB = Setup.language_db
position = fh.tell()
line_n = get_current_line_info_number(fh) + 1
try:
skip_whitespace(fh)
position = fh.tell()
code = __parse_token_id_specification_by_character_code(fh)
if code != -1:
code = __create_token_sender_by_character_code(fh, code)
else:
skip_whitespace(fh)
identifier = __read_token_identifier(fh)
skip_whitespace(fh)
if identifier in ["GOTO", "GOSUB", "GOUP"]:
code = __create_mode_transition_and_token_sender(fh, identifier)
else:
code = __create_token_sender_by_token_name(fh, identifier)
check_or_die(fh, ";")
if code != "":
if ContinueF: code += "QUEX_SETTING_AFTER_SEND_CONTINUE_OR_RETURN();\n"
return UserCodeFragment(code, fh.name, line_n, LanguageDB)
else:
return None
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing token shortcut.", fh)
示例5: parse_section
def parse_section(fh, descriptor, already_defined_list):
pos = fh.tell()
try:
return __parse_section(fh, descriptor, already_defined_list)
except EndOfStreamException:
fh.seek(pos)
error_msg("End of file reached while parsing token_type section.", fh)
示例6: __start_mode
def __start_mode(applicable_mode_name_list, mode_name_list):
"""If more then one mode is defined, then that requires an explicit
definition 'start = mode'.
"""
assert len(applicable_mode_name_list) != 0
start_mode = blackboard.initial_mode.get_pure_code()
if start_mode == "":
# Choose an applicable mode as start mode
start_mode = applicable_mode_name_list[0]
blackboard.initial_mode = CodeFragment(start_mode)
if len(applicable_mode_name_list) > 1:
error_msg("No initial mode defined via 'start' while more than one applicable mode exists.\n" + \
"Use for example 'start = %s;' in the quex source file to define an initial mode." \
% start_mode)
# This Branch: start mode is applicable and present
else:
FileName = blackboard.initial_mode.filename
LineN = blackboard.initial_mode.line_n
# Start mode present and applicable?
verify_word_in_list(start_mode, mode_name_list,
"Start mode '%s' is not defined." % start_mode,
FileName, LineN)
verify_word_in_list(start_mode, applicable_mode_name_list,
"Start mode '%s' is inheritable only and cannot be instantiated." % start_mode,
FileName, LineN)
示例7: __delete_forbidden_ranges
def __delete_forbidden_ranges(sm, fh):
"""Unicode does define all code points >= 0. Thus there can be no code points
below zero as it might result from some number set operations.
NOTE: This operation might result in orphaned states that have to
be deleted.
"""
global Setup
character_value_limit = Setup.get_character_value_limit()
for state in sm.states.values():
for target_state_index, trigger_set in state.transitions().get_map().items():
# Make sure, all transitions lie inside the unicode code range
if trigger_set.minimum() < UnicodeInterval.begin or trigger_set.supremum() >= UnicodeInterval.end:
trigger_set.intersect_with(UnicodeInterval)
if trigger_set.supremum() > character_value_limit:
error_msg("Pattern contains character beyond the scope of the buffer element size (%s)\n" \
% Setup.get_character_value_limit_str() + \
"Please, cut the character range of the regular expression,\n"
"adapt \"--buffer-element-size\" or \"--buffer-element-type\",\n" + \
"or specify '--buffer-element-size-irrelevant' to ignore the issue.", fh)
if Setup.buffer_codec in ["utf16-le", "utf16-be"]:
# Delete the forbidden interval: D800-DFFF
if trigger_set.has_intersection(ForbiddenRange):
error_msg("Pattern contains characters in unicode range 0xD800-0xDFFF.\n"
"This range is not covered by UTF16. Cutting Interval.", fh, DontExitF=True)
trigger_set.cut_interval(ForbiddenRange)
# If the operation resulted in cutting the path to the target state, then delete it.
if trigger_set.is_empty():
state.transitions().delete_transitions_to_target(target_state_index)
示例8: parse_standard_members
def parse_standard_members(fh, section_name, descriptor, already_defined_list):
if not check(fh, "{"):
error_msg("Missing opening '{' at begin of token_type section '%s'." % section_name, fh);
position = fh.tell()
while 1 + 1 == 2:
try:
result = parse_variable_definition(fh)
except EndOfStreamException:
fh.seek(position)
error_eof("standard", fh)
if result is None: return
type_code_fragment, name = result[0], result[1]
__validate_definition(type_code_fragment, name,
already_defined_list, StandardMembersF=True)
if name == "id": descriptor.token_id_type = type_code_fragment
elif name == "column_number": descriptor.column_number_type = type_code_fragment
elif name == "line_number": descriptor.line_number_type = type_code_fragment
else:
assert False # This should have been caught by the variable parser function
already_defined_list.append([name, type_code_fragment])
示例9: __handle_property_match
def __handle_property_match(cl):
property_follower = cl.follow("", "--property-match")
sys.stderr.write("(please, wait for database parsing to complete)\n")
if property_follower == "":
return
fields = map(lambda x: x.strip(), property_follower.split("="))
if len(fields) != 2:
error_msg("Wrong property setting '%s'." % property_follower)
# -- determine name and value
name = fields[0]
wild_card_expression = fields[1]
# -- get the property from the database
property = __get_property(name)
if property is None:
return True
# -- find the character set for the given expression
if property.type == "Binary":
error_msg("Binary property '%s' is not subject to value wild card matching.\n" % property.name)
for value in property.get_wildcard_value_matches(wild_card_expression):
print value
示例10: __general_validate
def __general_validate(fh, Mode, Name, pos):
if Name == "on_indentation":
fh.seek(pos)
error_msg("Definition of 'on_indentation' is no longer supported since version 0.51.1.\n"
"Please, use 'on_indent' for the event of an opening indentation, 'on_dedent'\n"
"for closing indentation, and 'on_nodent' for no change in indentation.", fh)
def error_dedent_and_ndedent(code, A, B):
filename = "(unknown)"
line_n = "0"
if hasattr(code, "filename"): filename = code.filename
if hasattr(code, "line_n"): line_n = code.line_n
error_msg("Indentation event handler '%s' cannot be defined, because\n" % A,
fh, DontExitF=True, WarningF=False)
error_msg("the alternative '%s' has already been defined." % B,
filename, line_n)
if Name == "on_dedent" and Mode.events.has_key("on_n_dedent"):
fh.seek(pos)
code = Mode.events["on_n_dedent"]
if code.get_code() != "":
error_dedent_and_ndedent(code, "on_dedent", "on_n_dedent")
if Name == "on_n_dedent" and Mode.events.has_key("on_dedent"):
fh.seek(pos)
code = Mode.events["on_dedent"]
if code.get_code() != "":
error_dedent_and_ndedent(code, "on_n_dedent", "on_dedent")
示例11: get_character_value_limit
def get_character_value_limit(self):
"""A buffer element is a chunk of memory of the size of the granularity
of which the input pointer increases. For fixed size codecs, such as
ASCII or UCS32, the BUFFER ELEMENT VALUE LIMIT is exactly the same as
the CHARACTER VALUE LIMIT.
However, for dynamic sized codecs, such as UTF8 or UTF16, they are
different. In UTF8, the input pointer increments by one byte on each
state transition. However, a character may consist out of multiple
bytes. The buffer element value limit is 256, but the character value
limit is the whole range.
RETURNS: Integer = supremum of possible character range, i.e.
one character behind the last possible.
sys.maxint, if no such limit exists.
"""
buffer_element_size = self.buffer_element_size
if buffer_element_size == -1: return sys.maxint
try:
result = 256 ** buffer_element_size
except:
file_in.error_msg("Error while trying to compute 256 to the 'buffer-element-size' (%i bytes)\n" \
% buffer_element_size + \
"Adapt \"--buffer-element-size\" or \"--buffer-element-type\",\n" + \
"or specify '--buffer-element-size-irrelevant' to ignore the issue.")
if result > sys.maxint: return sys.maxint
else: return result
示例12: detect_path_of_nothing_is_necessary
def detect_path_of_nothing_is_necessary(sm, Name, PostContextPresentF, fh):
assert Name in ["", "pre-context", "post-context"]
if sm is None:
return
elif not sm.get_init_state().is_acceptance():
return
if len(Name) == 0: name_str = "core pattern"
else: name_str = Name
msg = "The %s contains in a 'nothing is necessary' path in the state machine.\n" \
% name_str + \
"This means, that without reading a character the analyzer drops into\n" + \
"an acceptance state. "
msg += {
"":
"The analyzer would then stall.",
"pre-context":
"E.g., pattern 'x*/y/' means that zero or more 'x' are a pre-\n" + \
"condition for 'y'. If zero appearances of 'x' are enough, then obviously\n" + \
"there is no pre-context for 'y'! Most likely the author intended 'x+/y/'.",
"post-context":
"A post context where nothing is necessary is superfluous.",
}[Name]
if Name != "post-context" and PostContextPresentF:
msg += "\n" \
"Note: A post context does not change anything to that fact."
error_msg(msg, fh)
示例13: buffer_codec_prepare
def buffer_codec_prepare(self, BufferCodecName, BufferCodecFileName=None, Module=None):
"""Determines: Setup.buffer_codec_name
Setup.buffer_codec
"""
if BufferCodecName in ("utf8", "utf16"):
assert Module is not None
result = codec_db.CodecDynamicInfo(BufferCodecName, Module)
elif BufferCodecFileName:
os.path.splitext(os.path.basename(BufferCodecFileName))
try:
os.path.splitext(os.path.basename(BufferCodecFileName))
except:
file_in.error_msg("cannot interpret string following '--codec-file'")
result = codec_db.CodecTransformationInfo(FileName=BufferCodecFileName)
elif BufferCodecName == "unicode":
# (Still, 'icu' or 'iconv' may provide converted content, but ...)
# If the internal buffer is 'unicode', then the pattern's state
# machines are not converted. The requirement for the pattern's
# range is the same as for the 'buffer element chunks'.
result = codec_db.CodecInfo("unicode",
NumberSet.from_range(0, self.get_character_value_limit()),
NumberSet.from_range(0, self.get_character_value_limit()))
elif BufferCodecName == "unit-test":
result = codec_db.CodecInfo("unicode",
NumberSet.from_range(-sys.maxint, sys.maxint),
NumberSet.from_range(-sys.maxint, sys.maxint))
else:
result = codec_db.CodecTransformationInfo(BufferCodecName)
self.buffer_codec = result
示例14: check_grid_values_integer_multiples
def check_grid_values_integer_multiples(self):
"""If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
"""
grid_value_list = []
min_info = None
for character_set, info in self.__map:
if info.cc_type != E_CharacterCountType.GRID:
if info.cc_type == E_CharacterCountType.COLUMN:
return
continue
elif type(info.value) in (str, unicode):
# If there is one single 'variable' grid value,
# then no assumptions can be made.
return
grid_value_list.append(info.value)
if min_info is None or info.value < min_info.value:
min_info = info
if min_info is None:
return
# Are all grid values a multiple of the minimum?
if len(filter(lambda x: x % min_info.value == 0, grid_value_list)) != len(grid_value_list):
return
error_msg("Setup does not contain spaces, only grids (tabulators). All grid\n" \
"widths are multiples of %i. The grid setup %s\n" \
% (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \
"is equivalent to a setup with space counts %s.\n" \
% repr(map(lambda x: x / min_info.value, sorted(grid_value_list)))[1:-1] + \
"Space counts are faster to compute.",
min_info.sr, DontExitF=True)
示例15: do
def do(ARGV):
"""Performs a query based on the given command line arguments.
RETURNS: True if a query was performed.
False if not query was requested.
"""
cl = GetPot(ARGV, SectionsEnabledF=False)
success_f = False
# Regular Expressions extract the BufferLimitCode and the PathTerminatorCode
# from the sets. So let us define them outside the normal range.
backup_buffer_limit_code = Setup.buffer_limit_code
backup_path_limit_code = Setup.path_limit_code
Setup.buffer_limit_code = -1
Setup.path_limit_code = -1
try:
success_f = True
if search_and_validate(cl, "--codec-info"): __handle_codec(cl)
elif search_and_validate(cl, "--codec-file-info"): __handle_codec_file(cl)
elif search_and_validate(cl, "--codec-for-language"): __handle_codec_for_language(cl)
elif search_and_validate(cl, "--property"): __handle_property(cl)
elif search_and_validate(cl, "--set-by-property"): __handle_set_by_property(cl)
elif search_and_validate(cl, "--set-by-expression"): __handle_set_by_expression(cl)
elif search_and_validate(cl, "--property-match"): __handle_property_match(cl)
else: success_f = False
except RegularExpressionException, x:
error_msg(x.message)