本文整理汇总了Python中token.tok_name方法的典型用法代码示例。如果您正苦于以下问题:Python token.tok_name方法的具体用法?Python token.tok_name怎么用?Python token.tok_name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类token
的用法示例。
在下文中一共展示了token.tok_name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: translate_symbols
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def translate_symbols(ast_tuple):
""" Translate numeric grammar symbols in an ast_tuple descriptive names.
This simply traverses the tree converting any integer value to values
found in symbol.sym_name or token.tok_name.
"""
new_list = []
for item in ast_tuple:
if isinstance(item, int):
new_list.append(int_to_symbol(item))
elif issequence(item):
new_list.append(translate_symbols(item))
else:
new_list.append(item)
if isinstance(ast_tuple, tuple):
return tuple(new_list)
else:
return new_list
示例2: extract_line_count
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
示例3: extract_line_count
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
示例4: __init__
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def __init__(self, code, value, start=(0,0), stop=(0,0), line=''):
"""
Args:
code (string|int): Token code. Ints are translated using token.tok_name.
value (string): Token value
start (tuple): Pair of values describing token start line, start position
stop (tuple): Pair of values describing token stop line, stop position
line (string): String containing the line the token was parsed from
"""
try:
self.code = token.tok_name[code]
except:
self.code = code
self.value = value
self.start = start
self.stop = stop
self.line = line
示例5: extract_line_count
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
示例6: int_to_symbol
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def int_to_symbol(i):
""" Convert numeric symbol or token to a desriptive name.
"""
try:
return symbol.sym_name[i]
except KeyError:
return token.tok_name[i]
示例7: __call__
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def __call__(self, ttype, tstring, stup, etup, line):
# dispatch
## import token
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
## 'tstring:', tstring
self.__state(ttype, tstring, stup[0])
示例8: extract_docstring
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
示例9: type_name
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def type_name(self):
return token.tok_name[self._type]
示例10: type_name
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def type_name(self):
return token.tok_name[self.type]
示例11: extract_docstring
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
示例12: check_tokenize
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
# The ENDMARKER is omitted.
result = []
f = BytesIO(s.encode('utf-8'))
for type, token, start, end, line in tokenize(f.readline):
if type == ENDMARKER:
break
type = tok_name[type]
result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
locals())
self.assertEqual(result,
[" ENCODING 'utf-8' (0, 0) (0, 0)"] +
expected.rstrip().splitlines())
示例13: assertExactTypeEqual
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def assertExactTypeEqual(self, opstr, *optypes):
tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
num_optypes = len(optypes)
self.assertEqual(len(tokens), 2 + num_optypes)
self.assertEqual(token.tok_name[tokens[0].exact_type],
token.tok_name[ENCODING])
for i in range(num_optypes):
self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
token.tok_name[optypes[i]])
self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
token.tok_name[token.ENDMARKER])
示例14: cvrtr
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def cvrtr(tuple):
"""debug method returning an ast string in a readable fashion"""
if type(tuple) is TupleType:
try:
try:
txt = 'token.'+token.tok_name[tuple[0]]
except:
txt = 'symbol.'+symbol.sym_name[tuple[0]]
except:
txt = 'Unknown token/symbol'
return [txt] + list(map(cvrtr, tuple[1:]))
else:
return tuple
示例15: token_repr
# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))