本文整理汇总了Python中token.NEWLINE属性的典型用法代码示例。如果您正苦于以下问题:Python token.NEWLINE属性的具体用法?Python token.NEWLINE怎么用?Python token.NEWLINE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类token
的用法示例。
在下文中一共展示了token.NEWLINE属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decorator
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
示例2: __openseen
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
示例3: test_tokenizing
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def test_tokenizing(self):
# Test that we produce meaningful tokens on initialization.
source = "import re # comment\n\nfoo = 'bar'\n"
atok = asttokens.ASTTokens(source)
self.assertEqual(atok.text, source)
self.assertEqual([str(t) for t in atok.tokens], [
"NAME:'import'",
"NAME:'re'",
"COMMENT:'# comment'",
"NEWLINE:'\\n'",
"NL:'\\n'",
"NAME:'foo'",
"OP:'='",
'STRING:"\'bar\'"',
"NEWLINE:'\\n'",
"ENDMARKER:''"
])
self.assertEqual(atok.tokens[5].type, token.NAME)
self.assertEqual(atok.tokens[5].string, 'foo')
self.assertEqual(atok.tokens[5].index, 5)
self.assertEqual(atok.tokens[5].startpos, 22)
self.assertEqual(atok.tokens[5].endpos, 25)
示例4: process_tokens
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def process_tokens(self, tokens):
# Module docstring can be a native string.
# Also use as a flag to notice when __future__ statements are no longer
# valid to avoid wasting time checking every NAME token
# (which is < STRING).
module_start = True
line_num = 1
for type_, val, start, end, line in tokens:
if type_ in (token.NEWLINE, tokenize.NL):
line_num += 1
# Anything else means we are past the first string in the module,
# any comments (e.g. shebang), and no more __future__ statements
# are possible.
if type_ > token.NEWLINE and type_ < token.N_TOKENS:
module_start = False
elif type_ == token.STRING:
line_num += val.count('\n')
if not module_start and not val.startswith(('u', 'b')):
self.add_message('native-string', line=line_num)
elif module_start and type_ == token.NAME:
if len(line) >= 39: # Fast-fail check
if u'__future__' in line and u'unicode_literals' in line:
return
示例5: __openseen
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING and is_literal_string(tstring):
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print(_(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
示例6: check_for_wrong_tuple
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def check_for_wrong_tuple(tree, code, noqa):
errors = []
candidates = []
for assign in ast.walk(tree):
if not isinstance(assign, (ast.Assign, ast.Return)):
continue
elif assign.lineno in noqa:
continue
elif isinstance(assign.value, ast.Call):
continue
for tuple_el in ast.walk(assign):
if isinstance(tuple_el, ast.Tuple) and len(tuple_el.elts) == 1:
candidates.append((assign.lineno, assign.col_offset))
break
if not candidates:
return []
for candidate in candidates:
number_nl = 0 # account for logical newlines within statements
tokens = tokenize.generate_tokens(
lambda L=iter(code): next(L)
)
previous_token = None
for t in tokens:
if previous_token is not None and previous_token.type == tokenize.NEWLINE:
number_nl = 0
x = TokenInfo(*t)
if x.start[0] - number_nl != candidate[0]:
previous_token = x
continue
if x.type == tokenize.NL:
number_nl += 1
if x.type == token.NEWLINE and ending_of_bad_tuple(previous_token):
errors.append(x.start)
if x.type == token.OP and x.string == '=' and previous_token.type != token.NAME:
x = TokenInfo(*next(tokens))
if x.type != token.OP and x.string != '(':
x_next = TokenInfo(*next(tokens))
if ending_of_bad_tuple(x_next):
errors.append(x.start)
previous_token = x
return errors
示例7: __init__
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
示例8: single_input
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
示例9: file_input
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))
示例10: decorators
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def decorators(self, nodelist):
# decorators: decorator ([NEWLINE] decorator)* NEWLINE
items = []
for dec_nodelist in nodelist:
assert dec_nodelist[0] == symbol.decorator
items.append(self.decorator(dec_nodelist[1:]))
return Decorators(items)
示例11: suite
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def suite(self, nodelist):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if len(nodelist) == 1:
return self.com_stmt(nodelist[0])
stmts = []
for node in nodelist:
if node[0] == symbol.stmt:
self.com_append_stmt(stmts, node)
return Stmt(stmts)
# --------------------------------------------------------------
#
# EXPRESSION NODES (invoked by com_node())
#
示例12: com_NEWLINE
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def com_NEWLINE(self, *args):
# A ';' at the end of a line can make a NEWLINE token appear
# here, Render it harmless. (genc discards ('discard',
# ('const', xxxx)) Nodes)
return Discard(Const(None))
示例13: tokeneater
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def tokeneater(self, toktype, toktext, xxx_todo_changeme, xxx_todo_changeme1, line):
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
if toktype == token.ERRORTOKEN:
raise RuntimeError("ErrorToken occured")
if toktype in [token.NEWLINE, tokenize.NL]:
self.output.write('\n')
self.col = 0
else:
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
# restore whitespace
assert scol >= self.col
self.output.write(" "*(scol-self.col))
try:
tokclass = tokclasses[toktype]
except KeyError:
tokclass = None
if self.tokclass is not None and tokclass != self.tokclass:
self.output.write('</span>')
if tokclass is not None and tokclass != self.tokclass:
self.output.write('<span class="%s">' % tokclass)
self.output.write(cgi.escape(toktext))
self.tokclass = tokclass
# calculate new column position
self.col = scol + len(toktext)
newline = toktext.rfind("\n")
if newline != -1:
self.col = len(toktext) - newline - 1
示例14: __suitedocstring
# 需要导入模块: import token [as 别名]
# 或者: from token import NEWLINE [as 别名]
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting