本文整理汇总了Python中Errors.log方法的典型用法代码示例。如果您正苦于以下问题:Python Errors.log方法的具体用法?Python Errors.log怎么用?Python Errors.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Errors
的用法示例。
在下文中一共展示了Errors.log方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pragmaIncbin
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def pragmaIncbin(ppt, line, result):
"Includes a binary file"
filename = line.expect("STRING").value
offset = IR.ConstantExpr(0)
size = None
if str(line.lookahead(0)) == ",":
line.pop()
offset = FE.parse_expr(line)
if str(line.lookahead(0)) == ",":
line.pop()
size = FE.parse_expr(line)
line.expect("EOL")
if type(filename) == str:
try:
f = file(os.path.join(FE.context_directory, filename), "rb")
if offset.hardcoded and (size is None or size.hardcoded):
# We know how big it will be, we can just use the values.
# First check to make sure they're sane
if offset.value() < 0:
Err.log("Offset may not be negative")
f.close()
return
f.seek(0, 2) # Seek to end of file
if offset.value() > f.tell():
Err.log("Offset runs past end of file")
f.close()
return
if size is not None:
if size.value() < 0:
Err.log("Length may not be negative")
f.close()
return
if offset.value() + size.value() > f.tell():
Err.log(".incbin length too long")
f.close()
return
if size is None:
size = IR.ConstantExpr(-1)
f.seek(offset.value())
bytes = f.read(size.value())
bytes = [IR.ConstantExpr(ord(x)) for x in bytes]
result.append(IR.Node(ppt, "Byte", *bytes))
else:
# offset or length could change based on label placement.
# This seems like an unbelievably bad idea, but since we
# don't have constant prop it will happen for any symbolic
# alias. Don't use symbolic aliases when extracting tiny
# pieces out of humongous files, I guess.
bytes = f.read()
bytes = [IR.ConstantExpr(ord(x)) for x in bytes]
if size is None:
size = IR.SequenceExpr([IR.ConstantExpr(len(bytes)),
"-",
offset])
result.append(IR.Node(ppt, "ByteRange", offset, size, *bytes))
f.close()
except IOError:
Err.log("Could not read " + filename)
return
示例2: endMacro
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def endMacro():
global currentname
global currentbody
global macros
if currentname is None:
Err.log("Internal error! Ended a non-existent macro!")
else:
macros[currentname] = currentbody
currentname = None
currentbody = None
示例3: add_token
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def add_token(token):
"Converts a substring into a single lexeme"
if token == "":
return
if token == "0":
result.append(Lexeme("NUM", 0))
return
firstchar = token[0]
rest = token[1:]
if firstchar == '"':
result.append(Lexeme("STRING", rest))
return
elif firstchar in bases:
try:
result.append(Lexeme("NUM", long(rest, bases[firstchar][1])))
return
except ValueError:
Err.log('Invalid ' + bases[firstchar][0] + ' constant: ' +
rest)
result.append(Lexeme("NUM", 0))
return
elif firstchar.isdigit():
try:
result.append(Lexeme("NUM", long(token)))
except ValueError:
Err.log('Identifiers may not begin with a number')
result.append(Lexeme("LABEL", "ERROR"))
return
elif firstchar == "'":
if len(rest) == 1:
result.append(Lexeme("NUM", ord(rest)))
else:
Err.log("Invalid character constant '" + rest + "'")
result.append(Lexeme("NUM", 0))
return
elif firstchar in punctuation:
if rest != "":
Err.log("Internal lexer error! '" + token + "' can't happen!")
result.append(Lexeme(firstchar))
return
else: # Label, opcode, or index register
id = token.lower()
if is_opcode(id):
result.append(Lexeme("OPCODE", id))
elif id == "x":
result.append(Lexeme("X"))
elif id == "y":
result.append(Lexeme("Y"))
else:
result.append(Lexeme("LABEL", id))
return
# should never reach here
Err.log("Internal lexer error: add_token fall-through")
示例4: newMacro
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def newMacro(name):
"Start creating a new macro with the specified name."
global currentname
global currentbody
global macros
if currentname is not None:
Err.log("Internal error! Nested macro attempt!")
else:
if name in macros:
Err.log("Duplicate macro definition '%s'" % name)
currentname = name
currentbody = []
示例5: expect
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def expect(self, *tokens):
"""Reads a token from the ParseLine line and returns it if it's of a
type in the sequence tokens. Otherwise, it logs an error."""
token = self.pop()
if token.type in tokens:
return token
if 'LABEL' in tokens:
if token.type in ['X', 'Y']:
token.value = token.type.lower()
token.type = 'LABEL'
return token
elif token.type == 'OPCODE':
token.type = 'LABEL'
return token
Err.log('Expected: "' + '", "'.join(tokens) + '"')
return token
示例6: expandMacro
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def expandMacro(ppt, name, arglist):
global macros
if name not in macros:
Err.log("Undefined macro '%s'" % name)
return IR.NullNode
argexprs = [IR.Node(ppt, "Label", "_*%d" % i, arg) for (i, arg) in zip(xrange(1, sys.maxint), arglist)]
bindexprs = [IR.Node(ppt, "Label", "_%d" % i, IR.LabelExpr("_*%d" % i)) for i in range(1, len(arglist) + 1)]
body = [IR.Node("%s->%s" % (ppt, node.ppt), node.nodetype, *node.data) for node in macros[name]]
invocation = (
[IR.Node(ppt, "ScopeBegin")]
+ argexprs
+ [IR.Node(ppt, "ScopeBegin")]
+ bindexprs
+ body
+ [IR.Node(ppt, "ScopeEnd"), IR.Node(ppt, "ScopeEnd")]
)
return IR.SequenceNode(ppt, invocation)
示例7: pragmaCharmapbin
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def pragmaCharmapbin(ppt, line, result):
"Load a new character map from a file"
global currentcharmap
filename = line.expect("STRING").value
line.expect("EOL")
if type(filename) == str:
try:
f = file(os.path.join(FE.context_directory, filename), "rb")
bytes = f.read()
f.close()
except IOError:
Err.log("Could not read " + filename)
return
if len(bytes) == 256:
currentcharmap = bytes
else:
Err.log("Character map " + filename + " not 256 bytes long")
示例8: pragmaCharmap
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def pragmaCharmap(ppt, line, result):
"Modify the character map."
global currentcharmap, basecharmap
if str(line.lookahead(0)) == "EOL":
currentcharmap = basecharmap
else:
bytes = readData(line)
try:
base = bytes[0].data
newsubstr = "".join([chr(x.data) for x in bytes[1:]])
currentcharmap = currentcharmap[:base] + newsubstr + \
currentcharmap[base + len(newsubstr):]
if len(currentcharmap) != 256 or base < 0 or base > 255:
Err.log("Charmap replacement out of range")
currentcharmap = currentcharmap[:256]
except ValueError:
Err.log("Illegal character in .charmap directive")
示例9: parse_file
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def parse_file(ppt, filename, load_once=False):
"Loads an Ophis source file, and returns an IR list."
global context_directory, loadedfiles
Err.currentpoint = ppt
old_context = context_directory
if filename != '-':
if context_directory is not None:
filename = os.path.abspath(os.path.join(context_directory,
filename))
if load_once and filename in loadedfiles:
if Cmd.print_loaded_files:
print>>sys.stderr, "Skipping " + filename
return IR.NullNode
loadedfiles[filename] = True
if Cmd.print_loaded_files:
if filename != '-':
print>>sys.stderr, "Loading " + filename
else:
print>>sys.stderr, "Loading from standard input"
try:
if filename != '-':
if context_directory is not None:
filename = os.path.join(context_directory, filename)
f = file(filename)
linelist = f.readlines()
f.close()
context_directory = os.path.abspath(os.path.dirname(filename))
else:
context_directory = os.getcwd()
linelist = sys.stdin.readlines()
pptlist = ["%s:%d" % (filename, i + 1) for i in range(len(linelist))]
lexlist = map(lex, pptlist, linelist)
IRlist = map(parse_line, pptlist, lexlist)
IRlist = [node for node in IRlist if node is not IR.NullNode]
context_directory = old_context
return IR.SequenceNode(ppt, IRlist)
except IOError:
Err.log("Could not read " + filename)
context_directory = old_context
return IR.NullNode
示例10: atom
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def atom():
"Parses lowest-priority expression components."
global templabelcount
next = line.lookahead(0).type
if next == "NUM":
return IR.ConstantExpr(line.expect("NUM").value)
elif next in ["LABEL", "X", "Y", "OPCODE"]:
return IR.LabelExpr(line.expect("LABEL").value)
elif next == "^":
line.expect("^")
return IR.PCExpr()
elif next == "[":
line.expect("[")
result = parse_expr(line)
line.expect("]")
return result
elif next == "+":
offset = 0
while next == "+":
offset += 1
line.expect("+")
next = line.lookahead(0).type
return IR.LabelExpr("*" + str(templabelcount + offset))
elif next == "-":
offset = 1
while next == "-":
offset -= 1
line.expect("-")
next = line.lookahead(0).type
return IR.LabelExpr("*" + str(templabelcount + offset))
elif next == ">":
line.expect(">")
return IR.HighByteExpr(atom())
elif next == "<":
line.expect("<")
return IR.LowByteExpr(atom())
else:
Err.log('Expected: expression')
示例11: lex
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def lex(point, line):
"""Turns a line of source into a sequence of lexemes."""
Err.currentpoint = point
result = []
def is_opcode(op):
"Tests whether a string is an opcode or an identifier"
return op in Ops.opcodes
def add_token(token):
"Converts a substring into a single lexeme"
if token == "":
return
if token == "0":
result.append(Lexeme("NUM", 0))
return
firstchar = token[0]
rest = token[1:]
if firstchar == '"':
result.append(Lexeme("STRING", rest))
return
elif firstchar in bases:
try:
result.append(Lexeme("NUM", long(rest, bases[firstchar][1])))
return
except ValueError:
Err.log('Invalid ' + bases[firstchar][0] + ' constant: ' +
rest)
result.append(Lexeme("NUM", 0))
return
elif firstchar.isdigit():
try:
result.append(Lexeme("NUM", long(token)))
except ValueError:
Err.log('Identifiers may not begin with a number')
result.append(Lexeme("LABEL", "ERROR"))
return
elif firstchar == "'":
if len(rest) == 1:
result.append(Lexeme("NUM", ord(rest)))
else:
Err.log("Invalid character constant '" + rest + "'")
result.append(Lexeme("NUM", 0))
return
elif firstchar in punctuation:
if rest != "":
Err.log("Internal lexer error! '" + token + "' can't happen!")
result.append(Lexeme(firstchar))
return
else: # Label, opcode, or index register
id = token.lower()
if is_opcode(id):
result.append(Lexeme("OPCODE", id))
elif id == "x":
result.append(Lexeme("X"))
elif id == "y":
result.append(Lexeme("Y"))
else:
result.append(Lexeme("LABEL", id))
return
# should never reach here
Err.log("Internal lexer error: add_token fall-through")
def add_EOL():
"Adds an end-of-line lexeme"
result.append(Lexeme("EOL"))
# Actual routine begins here
value = ""
quotemode = False
backslashmode = False
for c in line.strip():
if backslashmode:
backslashmode = False
value = value + c
elif c == "\\":
backslashmode = True
elif quotemode:
if c == '"':
quotemode = False
else:
value = value + c
elif c == ';':
add_token(value)
value = ""
break
elif c == '.' and value != "":
value = value + c
elif c.isspace():
add_token(value)
value = ""
elif c in punctuation:
add_token(value)
add_token(c)
value = ""
elif c == '"':
add_token(value)
value = '"'
quotemode = True
else:
#.........这里部分代码省略.........
示例12: aux
# 需要导入模块: import Errors [as 别名]
# 或者: from Errors import log [as 别名]
def aux():
"Accumulates all IR nodes defined by this line."
if line.lookahead(0).type == "EOL":
pass
elif line.lookahead(1).type == ":":
newlabel = line.expect("LABEL").value
line.expect(":")
result.append(IR.Node(ppt, "Label", newlabel, IR.PCExpr()))
aux()
elif line.lookahead(0).type == "*":
global templabelcount
templabelcount = templabelcount + 1
result.append(IR.Node(ppt, "Label", "*" + str(templabelcount),
IR.PCExpr()))
line.expect("*")
aux()
elif line.lookahead(0).type == "." or line.lookahead(0).type == "`":
which = line.expect(".", "`").type
if (which == "."):
pragma = line.expect("LABEL").value
else:
pragma = "invoke"
pragmaFunction = "pragma" + pragma.title()
for mod in pragma_modules:
if hasattr(mod, pragmaFunction):
getattr(mod, pragmaFunction)(ppt, line, result)
break
else:
Err.log("Unknown pragma " + pragma)
else: # Instruction
opcode = line.expect("OPCODE").value
arg2 = None
if line.lookahead(0).type == "#":
mode = "Immediate"
line.expect("#")
arg = parse_expr(line)
line.expect("EOL")
elif line.lookahead(0).type == "(":
line.expect("(")
arg = parse_expr(line)
if line.lookahead(0).type == ",":
mode = "PointerX"
line.expect(",")
line.expect("X")
line.expect(")")
line.expect("EOL")
else:
line.expect(")")
tok = line.expect(",", "EOL").type
if tok == "EOL":
mode = "Pointer"
else:
mode = "PointerY"
line.expect("Y")
line.expect("EOL")
elif line.lookahead(0).type == "EOL":
mode = "Implied"
arg = None
else:
arg = parse_expr(line)
tok = line.expect("EOL", ",").type
if tok == ",":
# Parser has to special-case the BBXn instructions,
# Which uniquely take two addresses
if opcode[:3] in ["bbs", "bbr"]:
arg2 = parse_expr(line)
mode = "Memory2"
else:
tok = line.expect("X", "Y").type
if tok == "X":
mode = "MemoryX"
else:
mode = "MemoryY"
line.expect("EOL")
else:
mode = "Memory"
result.append(IR.Node(ppt, mode, opcode, arg, arg2))