本文整理汇总了Python中util.peek函数的典型用法代码示例。如果您正苦于以下问题:Python peek函数的具体用法?Python peek怎么用?Python peek使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了peek函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ml_parse_value
def ml_parse_value(gmls, default=None):
""" Parse a value in a macro-language string. """
c = util.skip(gmls, ml_whitepace)
sgn = -1 if c == '-' else 1
if c in ('+', '-'):
gmls.read(1)
c = util.peek(gmls)
# don't allow default if sign is given
default = None
if c == '=':
gmls.read(1)
c = util.peek(gmls)
if len(c) == 0:
raise error.RunError(error.IFC)
elif ord(c) > 8:
name = util.get_var_name(gmls)
indices = ml_parse_indices(gmls)
step = var.get_var_or_array(name, indices)
util.require_read(gmls, (';',), err=error.IFC)
else:
# varptr$
step = get_value_for_varptrstr(gmls.read(3))
elif c and c in string.digits:
step = ml_parse_const(gmls)
elif default is not None:
step = default
else:
raise error.RunError(error.IFC)
if sgn == -1:
step = vartypes.number_neg(step)
return step
示例2: detokenise_line
def detokenise_line(ins, bytepos=None):
""" Convert a tokenised program line to ascii text. """
litstring, comment = False, False
textpos = 0
current_line = util.parse_line_number(ins)
if current_line < 0:
# parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
# stream ends or end of file sequence \x00\x00\x1A
return -1, '', 0
elif current_line == 0 and util.peek(ins) == ' ':
# ignore up to one space after line number 0
ins.read(1)
output = bytearray(str(current_line))
# write one extra whitespace character after line number
# unless first char is TAB
if util.peek(ins) != '\t':
output += bytearray(' ')
# detokenise tokens until end of line
while True:
s = ins.read(1)
if not textpos and ins.tell() >= bytepos:
textpos = len(output)
if s in tk.end_line:
# \x00 ends lines and comments when listed,
# if not inside a number constant
# stream ended or end of line
break
elif s == '"':
# start of literal string, passed verbatim
# until a closing quote or EOL comes by
# however number codes are *printed* as the corresponding numbers,
# even inside comments & literals
output += s
litstring = not litstring
elif s in tk.number:
ins.seek(-1, 1)
representation.detokenise_number(ins, output)
elif s in tk.linenum:
# 0D: line pointer (unsigned int) - this token should not be here;
# interpret as line number and carry on
# 0E: line number (unsigned int)
output += representation.uint_to_str(bytearray(ins.read(2)))
elif comment or litstring or ('\x20' <= s <= '\x7E'):
# honest ASCII
output += s
elif s == '\x0A':
# LF becomes LF CR
output += '\x0A\x0D'
elif s <= '\x09':
# controls that do not double as tokens
output += s
else:
ins.seek(-1, 1)
comment = detokenise_keyword(ins, output)
return current_line, output, textpos
示例3: tokenise_word
def tokenise_word(ins, outs):
""" Convert a keyword to tokenised form. """
word = ''
while True:
c = ins.read(1).upper()
word += c
# special cases 'GO TO' -> 'GOTO', 'GO SUB' -> 'GOSUB'
if word == 'GO':
pos = ins.tell()
# GO SUB allows 1 space
if util.peek(ins, 4) == ' SUB':
word = 'GOSUB'
ins.read(4)
else:
# GOTO allows any number of spaces
nxt = util.skip(ins, whitespace)
if ins.read(2) == 'TO':
word = 'GOTO'
else:
ins.seek(pos)
if word in ('GOTO', 'GOSUB'):
nxt = util.peek(ins).upper()
if nxt in name_chars:
ins.seek(pos)
word = 'GO'
else:
pass
if word in keyword_to_token:
# ignore if part of a longer name, except FN, SPC(, TAB(, USR
if word not in ('FN', 'SPC(', 'TAB(', 'USR'):
nxt = util.peek(ins).upper()
if nxt in name_chars:
continue
token = keyword_to_token[word]
# handle special case ELSE -> :ELSE
if word == 'ELSE':
outs.write(':' + token)
# handle special case WHILE -> WHILE+
elif word == 'WHILE':
outs.write(token + tk.O_PLUS)
else:
outs.write(token)
break
# allowed names: letter + (letters, numbers, .)
elif not(c in name_chars):
if c!='':
word = word[:-1]
ins.seek(-1, 1)
outs.write(word)
break
return word
示例4: tokenise_oct
def tokenise_oct(ins, outs):
""" Convert octal expression in Python string to number token. """
# O is optional, could also be &777 instead of &O777
if util.peek(ins).upper() == 'O':
ins.read(1)
word = ''
while True:
c = util.peek(ins)
if not c or c not in string.octdigits:
break
else:
word += ins.read(1)
val = int(word, 8) if word else 0
outs.write(tk.T_OCT + str(vartypes.value_to_uint(val)))
示例5: auto_step
def auto_step(self):
""" Generate an AUTO line number and wait for input. """
numstr = str(state.basic_state.auto_linenum)
console.write(numstr)
if state.basic_state.auto_linenum in state.basic_state.line_numbers:
console.write('*')
line = bytearray(console.wait_screenline(from_start=True))
if line[:len(numstr)+1] == numstr+'*':
line[len(numstr)] = ' '
else:
console.write(' ')
line = bytearray(console.wait_screenline(from_start=True))
# run or store it; don't clear lines or raise undefined line number
state.basic_state.direct_line = tokenise.tokenise_line(line)
c = util.peek(state.basic_state.direct_line)
if c == '\0':
# check for lines starting with numbers (6553 6) and empty lines
empty, scanline = program.check_number_start(state.basic_state.direct_line)
if not empty:
program.store_line(state.basic_state.direct_line)
reset.clear()
state.basic_state.auto_linenum = scanline + state.basic_state.auto_increment
elif c != '':
# it is a command, go and execute
state.basic_state.parse_mode = True
示例6: detokenise_line
def detokenise_line(ins, bytepos=None):
""" Convert a tokenised program line to ascii text. """
current_line = util.parse_line_number(ins)
if current_line < 0:
# parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
# stream ends or end of file sequence \x00\x00\x1A
return -1, '', 0
elif current_line == 0 and util.peek(ins) == ' ':
# ignore up to one space after line number 0
ins.read(1)
linum = bytearray(str(current_line))
# write one extra whitespace character after line number
# unless first char is TAB
if util.peek(ins) != '\t':
linum += bytearray(' ')
line, textpos = detokenise_compound_statement(ins, bytepos)
return current_line, linum + line, textpos + len(linum) + 1
示例7: tokenise_oct
def tokenise_oct(ins, outs):
""" Convert octal expression in Python string to number token. """
# O is optional, could also be &777 instead of &O777
if util.peek(ins).upper() == 'O':
ins.read(1)
word = ''
while True:
c = util.peek(ins)
# oct literals may be interrupted by whitespace
if c and c in number_whitespace:
ins.read(1)
elif not c or c not in string.octdigits:
break
else:
word += ins.read(1)
val = int(word, 8) if word else 0
outs.write(tk.T_OCT + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
示例8: tokenise_jump_number
def tokenise_jump_number(ins, outs):
""" Convert an ascii line number pointer to tokenised form. """
word = tokenise_uint(ins)
if word != '':
outs.write(tk.T_UINT + word)
elif util.peek(ins) == '.':
ins.read(1)
outs.write('.')
示例9: tokenise_data
def tokenise_data(ins, outs):
""" Pass DATA as is, till end of statement, except for literals. """
while True:
outs.write(ascii_read_to(ins, ('', '\r', '\0', ':', '"')))
if util.peek(ins) == '"':
# string literal in DATA
tokenise_literal(ins, outs)
else:
break
示例10: tokenise_hex
def tokenise_hex(ins, outs):
""" Convert hex expression in Python string to number token. """
ins.read(1)
word = ''
while True:
c = util.peek(ins)
if not c or c not in string.hexdigits:
break
else:
word += ins.read(1)
val = int(word, 16) if word else 0
outs.write(tk.T_HEX + str(vartypes.value_to_uint(val)))
示例11: read_entry
def read_entry():
""" READ a unit of DATA. """
current = state.basic_state.bytecode.tell()
state.basic_state.bytecode.seek(state.basic_state.data_pos)
if util.peek(state.basic_state.bytecode) in util.end_statement:
# initialise - find first DATA
util.skip_to(state.basic_state.bytecode, ('\x84',)) # DATA
if state.basic_state.bytecode.read(1) not in ('\x84', ','):
# out of DATA
raise error.RunError(4)
vals, word, literal = '', '', False
while True:
# read next char; omit leading whitespace
if not literal and vals == '':
c = util.skip_white(state.basic_state.bytecode)
else:
c = util.peek(state.basic_state.bytecode)
# parse char
if c == '' or (not literal and c == ',') or (c in util.end_line or (not literal and c in util.end_statement)):
break
elif c == '"':
state.basic_state.bytecode.read(1)
literal = not literal
if not literal:
util.require(state.basic_state.bytecode, util.end_statement+(',',))
else:
state.basic_state.bytecode.read(1)
if literal:
vals += c
else:
word += c
# omit trailing whitespace
if c not in util.whitespace:
vals += word
word = ''
state.basic_state.data_pos = state.basic_state.bytecode.tell()
state.basic_state.bytecode.seek(current)
return vals
示例12: tokenise_hex
def tokenise_hex(ins, outs):
""" Convert hex expression in Python string to number token. """
# pass the H in &H
ins.read(1)
word = ''
while True:
c = util.peek(ins)
# hex literals must not be interrupted by whitespace
if not c or c not in string.hexdigits:
break
else:
word += ins.read(1)
val = int(word, 16) if word else 0
outs.write(tk.T_HEX + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
示例13: tokenise_number
def tokenise_number(ins, outs):
""" Convert Python-string number representation to number token. """
c = util.peek(ins)
if not c:
return
elif c == '&':
# handle hex or oct constants
ins.read(1)
if util.peek(ins).upper() == 'H':
# hex constant
tokenise_hex(ins, outs)
else:
# octal constant
tokenise_oct(ins, outs)
elif c in string.digits + '.+-':
# handle other numbers
# note GW passes signs separately as a token
# and only stores positive numbers in the program
tokenise_dec(ins, outs)
else:
# why is this here?
# this looks wrong but hasn't hurt so far
ins.seek(-1, 1)
示例14: store_line
def store_line(self, line):
""" Store a program line or schedule a command line for execution. """
if not line:
return True
state.basic_state.direct_line = tokenise.tokenise_line(line)
c = util.peek(state.basic_state.direct_line)
if c == '\0':
# check for lines starting with numbers (6553 6) and empty lines
program.check_number_start(state.basic_state.direct_line)
program.store_line(state.basic_state.direct_line)
reset.clear()
elif c != '':
# it is a command, go and execute
state.basic_state.parse_mode = True
return not state.basic_state.parse_mode
示例15: get_number_tokens
def get_number_tokens(fors):
""" Get consecutive number-related formatting tokens. """
word, digits_before, decimals = '', 0, 0
# + comes first
leading_plus = (util.peek(fors) == '+')
if leading_plus:
word += fors.read(1)
# $ and * combinations
c = util.peek(fors)
if c in ('$', '*'):
word += fors.read(2)
if word[-1] != c:
fors.seek(-len(word), 1)
return '', 0, 0
if c == '*':
digits_before += 2
if util.peek(fors) == '$':
word += fors.read(1)
else:
digits_before += 1
# number field
c = util.peek(fors)
dot = (c == '.')
if dot:
word += fors.read(1)
if c in ('.', '#'):
while True:
c = util.peek(fors)
if not dot and c == '.':
word += fors.read(1)
dot = True
elif c == '#' or (not dot and c == ','):
word += fors.read(1)
if dot:
decimals += 1
else:
digits_before += 1
else:
break
if digits_before + decimals == 0:
fors.seek(-len(word), 1)
return '', 0, 0
# post characters
if util.peek(fors, 4) == '^^^^':
word += fors.read(4)
if not leading_plus and util.peek(fors) in ('-', '+'):
word += fors.read(1)
return word, digits_before, decimals