本文整理汇总了Python中quex.engine.misc.error.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: utf8_to_unicode
def utf8_to_unicode(ByteSequence):
"""Unfortunately, there is no elegant way to do the utf8-decoding
safely in libPython, since due to strange behavior of a python
narrow build a character >= 0x10000 may appear as a 2 byte string
and cannot be handled by 'ord' in python 2.x.
Thus:
utf8d = codecs.getdecoder("utf-8")
return ord(utf8d("".join(map(chr, ByteSequence)))[0])
would be unsafe. That's why we do it by hand here
"""
# Assume that the byte sequence is valid, thus a byte sequence of length 'N'
# has a N - 1 leading ones in the header plus a zero. Remaining bits in the
# header are therefore 8 - N. All other bytes in the sequence start with bits '10'
# and contain 6 bits of useful payload.
header_bit_n = 8 - len(ByteSequence)
mask = (1 << header_bit_n) - 1
value = ByteSequence[0] & mask
for byte in ByteSequence[1:]:
value <<= 6
value |= (byte & 0x3F) # blend off the highest two bits
# The highest two bits in a follow byte in utf8 MUST be '10'. Thus:
if (byte & 0xC0) != 0x80:
error.log("Error in UTF8 encoded file. Inadmissible byte sequence detected. Found byte '%02X'" % byte)
return value
示例2: __roman_number
def __roman_number(Text, fh):
"""Source: http://code.activestate.com -- Code Recipes
Recipe 81611 by Paul Winkler.
"""
input = Text.upper()
# map of (numeral, value, maxcount) tuples
roman_numeral_map = (('M', 1000, None), ('CM', 900, 1),
('D', 500, 1), ('CD', 400, 1),
('C', 100, 3), ('XC', 90, 1),
('L', 50, 1), ('XL', 40, 1),
('X', 10, 3), ('IX', 9, 1),
('V', 5, 1), ('IV', 4, 1),
('I', 1, 3))
result, index = 0, 0
for numeral, value, maxcount in roman_numeral_map:
count = 0
while input[index: index + len(numeral)] == numeral:
count += 1 # how many of this numeral we have
if maxcount is not None and count > maxcount:
error.log("input 0r%s is not a valid roman numeral." % Text, fh)
result += value
index += len(numeral)
if index < len(input): # There are characters unaccounted for.
error.log("input 0r%s is not a valid roman numeral." % Text, fh)
return result
示例3: argv_is_query_option
def argv_is_query_option(Cl, Option, Name, PrevQueryF):
"""Determines whether the setup parameter is a parameter related to
queries (or to code generation). If a mixed usage is detected an
error is issued.
RETURN: query flag
The query flag is the same as QueryF, except for one case: when QueryF
was None (unset) and the option appeared on the command line. Then, the
return value tells whether the option was a query flag or not.
ERROR: If there are mixed options, i.e. query flags and code generation
flags appear at the same time.
"""
query_f = (Name.find("query_") == 0)
if PrevQueryF is None: return query_f
elif PrevQueryF == query_f: return query_f
# If debug exception is enabled, do not trigger errror
if Cl.search(SETUP_INFO["_debug_exception_f"][0]): return query_f
error.log("Mixed options: query and code generation mode.\n"
"The option(s) '%s' cannot be combined with preceeding options." \
% str(SETUP_INFO[Name][0])[1:-1].replace("'",""))
示例4: buffer_codec_prepare
def buffer_codec_prepare(self, BufferCodecName, BufferCodecFileName=None, Module=None):
"""Determines: Setup.buffer_codec_name
Setup.buffer_codec
"""
assert BufferCodecName == "unit-test" \
or self.__buffer_element_specification_done_f == True
if BufferCodecName in ("utf8", "utf16"):
assert Module is not None
result = codec_db.CodecDynamicInfo(BufferCodecName, Module)
elif BufferCodecFileName:
os.path.splitext(os.path.basename(BufferCodecFileName))
try:
os.path.splitext(os.path.basename(BufferCodecFileName))
except:
error.log("cannot interpret string following '--codec-file'")
result = codec_db.CodecTransformationInfo(FileName=BufferCodecFileName)
elif BufferCodecName == "unicode":
# (Still, 'icu' or 'iconv' may provide converted content, but ...)
# If the internal buffer is 'unicode', then the pattern's state
# machines are not converted. The requirement for the pattern's
# range is the same as for the 'buffer element chunks'.
result = codec_db.CodecInfo("unicode",
NumberSet.from_range(0, self.get_character_value_limit()),
NumberSet.from_range(0, self.get_character_value_limit()))
elif BufferCodecName == "unit-test":
result = codec_db.CodecInfo("unicode",
NumberSet.from_range(-sys.maxint, sys.maxint),
NumberSet.from_range(-sys.maxint, sys.maxint))
else:
result = codec_db.CodecTransformationInfo(BufferCodecName)
self.buffer_codec = result
示例5: read_integer
def read_integer(fh):
pos = fh.tell()
base, digit_list = get_number_base(fh)
if base is None: return None
txt = ""
while 1 + 1 == 2:
tmp = fh.read(1)
if tmp == "": break
elif tmp not in digit_list: fh.seek(-1, 1); break
txt += tmp
# If we drop out on a digit, then let us assume that the user just missed a point
if tmp.isdigit() or (tmp in list("ABCDEFabcdef")):
error.log("Digit '%s' cannot be part of an expression of base %s." % (tmp, base), fh)
txt = txt.replace(".", "")
if len(txt) == 0:
if base in [2, 8, 16, "roman", "Napier"]:
error.log("Missing digits after for number of base %s, found '%s'." % (str(base), tmp), fh)
fh.seek(pos)
return None
# Octal, decimal, and hexadecimal numbers
if base in [2, 8, 10, 16]: return int(txt, base)
elif base == "roman": return __roman_number(txt, fh)
elif base == "Napier": return __napier_number(txt, fh)
else: return __binary_number(txt, fh)
示例6: get_character_value_limit
def get_character_value_limit(self):
"""A buffer element is a chunk of memory of the size of the granularity
of which the input pointer increases. For fixed size codecs, such as
ASCII or UCS32, the BUFFER ELEMENT VALUE LIMIT is exactly the same as
the CHARACTER VALUE LIMIT.
However, for dynamic sized codecs, such as UTF8 or UTF16, they are
different. In UTF8, the input pointer increments by one byte on each
state transition. However, a character may consist out of multiple
bytes. The buffer element value limit is 256, but the character value
limit is the whole range.
RETURNS: Integer = supremum of possible character range, i.e.
one character behind the last possible.
sys.maxint, if no such limit exists.
"""
buffer_element_size = self.buffer_element_size
if buffer_element_size == -1: return sys.maxint
try:
result = 256 ** buffer_element_size
except:
error.log("Error while trying to compute 256 to the 'buffer-element-size' (%i bytes)\n" \
% buffer_element_size + \
"Adapt \"--buffer-element-size\" or \"--buffer-element-type\",\n" + \
"or specify '--buffer-element-size-irrelevant' to ignore the issue.")
if result > sys.maxint: return sys.maxint
else: return result
示例7: buffer_element_specification_prepare
def buffer_element_specification_prepare(self):
global global_character_type_db
if self.buffer_element_size == "wchar_t":
error.log("Since Quex version 0.53.5, 'wchar_t' can no longer be specified\n"
"with option '--buffer-element-size' or '-bes'. Please, specify\n"
"'--buffer-element-type wchar_t' or '--bet'.")
if self.buffer_element_type == "wchar_t":
self.converter_ucs_coding_name = "WCHAR_T"
# (*) Determine buffer element type and size (in bytes)
if self.buffer_element_size == -1:
if global_character_type_db.has_key(self.buffer_element_type):
self.buffer_element_size = global_character_type_db[self.buffer_element_type][3]
elif self.buffer_element_type == "":
self.buffer_element_size = 1
else:
# Buffer element type is not identified in 'global_character_type_db'.
# => here Quex cannot know its size on its own.
self.buffer_element_size = -1
if self.buffer_element_type == "":
if self.buffer_element_size in [1, 2, 4]:
self.buffer_element_type = {
1: "uint8_t", 2: "uint16_t", 4: "uint32_t",
}[self.buffer_element_size]
elif self.buffer_element_size == -1:
pass
else:
error.log("Buffer element type cannot be determined for size '%i' which\n" \
% self.buffer_element_size +
"has been specified by '-b' or '--buffer-element-size'.")
self.__buffer_element_specification_done_f = True
示例8: run
def run(cl, Argv):
if Setup.query_version_f: print_version(); return
elif Setup.query_help_f: print_help(); return
# Regular Expressions extract the BufferLimitCode and the PathTerminatorCode
# from the sets. So let us define them outside the normal range.
backup_buffer_limit_code = Setup.buffer_limit_code
backup_path_limit_code = Setup.path_limit_code
Setup.buffer_limit_code = -1
Setup.path_limit_code = -1
try:
if Setup.query_codec: __handle_codec(cl)
elif Setup.query_codec_list: __handle_codec_list(cl)
elif Setup.query_codec_file: __handle_codec_file(cl)
elif Setup.query_codec_language: __handle_codec_for_language(cl)
elif Setup.query_property is not None: __handle_property(cl)
elif Setup.query_set_by_property: __handle_set_by_property(cl)
elif Setup.query_set_by_expression: __handle_set_by_expression(cl)
elif Setup.query_property_match: __handle_property_match(cl)
else:
assert False # No query option(s) !
except RegularExpressionException, x:
error.log(x.message)
示例9: do
def do(BufferCodecName, BufferCodecFileName=""):
from quex.engine.state_machine.transformation.base import EncodingTrafoUnicode
from quex.engine.state_machine.transformation.table import EncodingTrafoByTable
from quex.engine.state_machine.transformation.utf8_state_split import EncodingTrafoUTF8
from quex.engine.state_machine.transformation.utf16_state_split import EncodingTrafoUTF16
if BufferCodecName == "utf8":
return EncodingTrafoUTF8()
elif BufferCodecName == "utf16":
return EncodingTrafoUTF16()
elif BufferCodecFileName:
os.path.splitext(os.path.basename(BufferCodecFileName))
try:
os.path.splitext(os.path.basename(BufferCodecFileName))
except:
error.log("cannot interpret string following '--codec-file'")
return EncodingTrafoByTable(FileName=BufferCodecFileName)
elif BufferCodecName == "unicode":
# (Still, 'icu' or 'iconv' may provide converted content, but ...)
# If the internal buffer is 'unicode', then the pattern's state
# machines are not converted. The requirement for the pattern's
# range is the same as for the 'buffer element chunks'.
return EncodingTrafoUnicode(NumberSet(Interval(0, 0x110000)),
NumberSet(Interval(0, 0x110000)))
elif BufferCodecName == "unit-test":
return EncodingTrafoUnicode(NumberSet_All(), NumberSet_All())
else:
return EncodingTrafoByTable(BufferCodecName)
示例10: __general_validate
def __general_validate(fh, Mode, Name, pos):
if Name == "on_indentation":
fh.seek(pos)
error.log("Definition of 'on_indentation' is no longer supported since version 0.51.1.\n"
"Please, use 'on_indent' for the event of an opening indentation, 'on_dedent'\n"
"for closing indentation, and 'on_nodent' for no change in indentation.", fh)
def error_dedent_and_ndedent(code, A, B):
error.log("Indentation event handler '%s' cannot be defined, because\n" % A,
fh, DontExitF=True)
error.log("the alternative '%s' has already been defined." % B,
code.sr)
if Name == "on_dedent" and Mode.incidence_db.has_key("on_n_dedent"):
fh.seek(pos)
code = Mode.incidence_db["on_n_dedent"]
if not code.is_whitespace():
error_dedent_and_ndedent(code, "on_dedent", "on_n_dedent")
if Name == "on_n_dedent" and Mode.incidence_db.has_key("on_dedent"):
fh.seek(pos)
code = Mode.incidence_db["on_dedent"]
if not code.is_whitespace():
error_dedent_and_ndedent(code, "on_n_dedent", "on_dedent")
示例11: __prepare_buffer_element_specification
def __prepare_buffer_element_specification(setup):
global global_character_type_db
if Setup.buffer_lexatom_size_in_byte == "wchar_t":
error.log("Since Quex version 0.53.5, 'wchar_t' can no longer be specified\n"
"with option '--buffer-element-size' or '-bes'. Please, specify\n"
"'--buffer-element-type wchar_t' or '--bet'.")
if Setup.buffer_lexatom_type == "wchar_t":
Setup.converter_ucs_coding_name = "WCHAR_T"
# (*) Determine buffer element type and size (in bytes)
lexatom_size_in_byte = Setup.buffer_lexatom_size_in_byte
if lexatom_size_in_byte == -1:
if global_character_type_db.has_key(Setup.buffer_lexatom_type):
lexatom_size_in_byte = global_character_type_db[Setup.buffer_lexatom_type][3]
elif Setup.buffer_lexatom_type == "":
lexatom_size_in_byte = 1
else:
# Buffer element type is not identified in 'global_character_type_db'.
# => here Quex cannot know its size on its own.
lexatom_size_in_byte = -1
if Setup.buffer_lexatom_type == "":
if lexatom_size_in_byte in [1, 2, 4]:
Setup.buffer_lexatom_type = {
1: "uint8_t", 2: "uint16_t", 4: "uint32_t",
}[lexatom_size_in_byte]
elif lexatom_size_in_byte == -1:
pass
else:
error.log("Buffer element type cannot be determined for size '%i' which\n" \
% lexatom_size_in_byte +
"has been specified by '-b' or '--buffer-element-size'.")
return lexatom_size_in_byte
示例12: __parse_event
def __parse_event(new_mode, fh, word):
pos = fh.tell()
# Allow '<<EOF>>' and '<<FAIL>>' out of respect for classical tools like 'lex'
if word == "<<EOF>>": word = "on_end_of_stream"
elif word == "<<FAIL>>": word = "on_failure"
elif word in blackboard.all_section_title_list:
error.log("Pattern '%s' is a quex section title. Has the closing '}' of mode %s \n" % (word, new_mode.name) \
+ "been forgotten? Else use quotes, i.e. \"%s\"." % word, fh)
elif len(word) < 3 or word[:3] != "on_": return False
comment = "Unknown event handler '%s'. \n" % word + \
"Note, that any pattern starting with 'on_' is considered an event handler.\n" + \
"use double quotes to bracket patterns that start with 'on_'."
__general_validate(fh, new_mode, word, pos)
error.verify_word_in_list(word, standard_incidence_db.keys(), comment,
fh)
__validate_required_token_policy_queue(word, fh, pos)
continue_f = True
if word == "on_end_of_stream" or word == "on_failure":
# -- When a termination token is sent, no other token shall follow.
# => Enforce return from the analyzer! Do not allow CONTINUE!
# -- When an 'on_failure' is received allow immediate action of the
# receiver => Do not allow CONTINUE!
continue_f = False
new_mode.incidence_db[word] = \
code_fragment.parse(fh, "%s::%s event handler" % (new_mode.name, word),
ContinueF=continue_f)
return True
示例13: __parse_base_mode_list
def __parse_base_mode_list(fh, new_mode):
new_mode.derived_from_list = []
trailing_comma_f = False
while 1 + 1 == 2:
if check(fh, "{"): fh.seek(-1, 1); break
elif check(fh, "<"): fh.seek(-1, 1); break
skip_whitespace(fh)
identifier = read_identifier(fh)
if identifier == "": break
new_mode.derived_from_list.append(identifier)
trailing_comma_f = False
if not check(fh, ","): break
trailing_comma_f = True
if trailing_comma_f:
error.warning("Trailing ',' after base mode '%s'." % new_mode.derived_from_list[-1], fh)
elif len(new_mode.derived_from_list) != 0:
# This check is a 'service' -- for those who follow the old convention
pos = fh.tell()
skip_whitespace(fh)
dummy_identifier = read_identifier(fh)
if dummy_identifier != "":
error.log("Missing separating ',' between base modes '%s' and '%s'.\n" \
% (new_mode.derived_from_list[-1], dummy_identifier) + \
"(The comma separator is mandatory since quex 0.53.1)", fh)
fh.seek(pos)
示例14: parse
def parse(fh):
"""This function parses a mode description and enters it into the
'blackboard.mode_description_db'. Once all modes are parsed
they can be translated into 'real' modes and are located in
'blackboard.mode_db'.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
skip_whitespace(fh)
mode_name = read_identifier(fh, OnMissingStr="Missing identifier at beginning of mode definition.")
# NOTE: constructor does register this mode in the mode_db
new_mode = ModeDescription(mode_name, SourceRef.from_FileHandle(fh))
# (*) inherited modes / option_db
skip_whitespace(fh)
dummy = fh.read(1)
if dummy not in [":", "{"]:
error.log("missing ':' or '{' after mode '%s'" % mode_name, fh)
if dummy == ":":
__parse_option_list(new_mode, fh)
# (*) read in pattern-action pairs and events
while __parse_element(new_mode, fh):
pass
示例15: __parse
def __parse(fh, result, IndentationSetupF=False):
"""Parses pattern definitions of the form:
[ \t] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
#
while 1 + 1 == 2:
skip_whitespace(fh)
if check(fh, ">"):
break
# A regular expression state machine
pattern, identifier, sr = __parse_definition_head(fh, result)
if pattern is None and IndentationSetupF:
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
# '__parse_definition_head()' ensures that only identifiers mentioned in
# 'result' are accepted.
if not IndentationSetupF:
value = read_value_specifier(fh, identifier, 1)
result.specify(identifier, pattern, value, sr)
else:
result.specify(identifier, pattern, sr)
if not check(fh, ";"):
error.log("Missing ';' after '%s' specification." % identifier, fh)
return result