本文整理汇总了Python中tokenize.NEWLINE属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.NEWLINE属性的具体用法?Python tokenize.NEWLINE怎么用?Python tokenize.NEWLINE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tokenize
的用法示例。
在下文中一共展示了tokenize.NEWLINE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_type
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def get_type(tokens, start_index):
"""return the line type : docstring, comment, code, empty"""
i = start_index
tok_type = tokens[i][0]
start = tokens[i][2]
pos = start
line_type = None
while i < len(tokens) and tokens[i][2][0] == start[0]:
tok_type = tokens[i][0]
pos = tokens[i][3]
if line_type is None:
if tok_type == tokenize.STRING:
line_type = 'docstring_lines'
elif tok_type == tokenize.COMMENT:
line_type = 'comment_lines'
elif tok_type in JUNK:
pass
else:
line_type = 'code_lines'
i += 1
if line_type is None:
line_type = 'empty_lines'
elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
i += 1
return i, pos[0] - start[0] + 1, line_type
示例2: _find_logical
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
示例3: get_type
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def get_type(tokens, start_index):
"""return the line type : docstring, comment, code, empty"""
i = start_index
tok_type = tokens[i][0]
start = tokens[i][2]
pos = start
line_type = None
while i < len(tokens) and tokens[i][2][0] == start[0]:
tok_type = tokens[i][0]
pos = tokens[i][3]
if line_type is None:
if tok_type == tokenize.STRING:
line_type = "docstring_lines"
elif tok_type == tokenize.COMMENT:
line_type = "comment_lines"
elif tok_type in JUNK:
pass
else:
line_type = "code_lines"
i += 1
if line_type is None:
line_type = "empty_lines"
elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
i += 1
return i, pos[0] - start[0] + 1, line_type
示例4: process_tokens
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def process_tokens(self, tokens):
encoding = "ascii"
for i, (tok_type, token, start, _, line) in enumerate(tokens):
if tok_type == tokenize.ENCODING:
# this is always the first token processed
encoding = token
elif tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self.process_string_token(token, start[0])
# We figure the next token, ignoring comments & newlines:
j = i + 1
while j < len(tokens) and tokens[j].type in (
tokenize.NEWLINE,
tokenize.NL,
tokenize.COMMENT,
):
j += 1
next_token = tokens[j] if j < len(tokens) else None
if encoding != "ascii":
# We convert `tokenize` character count into a byte count,
# to match with astroid `.col_offset`
start = (start[0], len(line[: start[1]].encode(encoding)))
self.string_tokens[start] = (str_eval(token), next_token)
示例5: scanvars
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
示例6: __openseen
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
示例7: enumerate_keyword_args
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def enumerate_keyword_args(tokens):
"""
Iterates over *tokens* and returns a dictionary with function names as the
keys and lists of keyword arguments as the values.
"""
keyword_args = {}
inside_function = False
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
inside_function = False
if token_type == tokenize.NAME:
if token_string == "def":
function_name = tokens[index + 1][1]
inside_function = function_name
keyword_args.update({function_name: []})
elif inside_function:
if tokens[index + 1][1] == '=': # keyword argument
keyword_args[function_name].append(token_string)
return keyword_args
示例8: remove_docstrings
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def remove_docstrings(tokens):
"""
Removes docstrings from *tokens* which is expected to be a list equivalent
of `tokenize.generate_tokens()` (so we can update in-place).
"""
prev_tok_type = None
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.STRING:
if prev_tok_type == tokenize.INDENT:
# Definitely a docstring
tokens[index][1] = '' # Remove it
# Remove the leftover indentation and newline:
tokens[index - 1][1] = ''
tokens[index - 2][1] = ''
elif prev_tok_type == tokenize.NL:
# This captures whole-module docstrings:
if tokens[index + 1][0] == tokenize.NEWLINE:
tokens[index][1] = ''
# Remove the trailing newline:
tokens[index + 1][1] = ''
prev_tok_type = token_type
示例9: __get_tokens
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def __get_tokens(it):
tokens: List[tokenize.TokenInfo] = []
try:
for t in it:
if t.type in tokenizer.SKIP_TOKENS:
continue
if t.type == tokenize.NEWLINE and t.string == '':
continue
if t.type == tokenize.DEDENT:
continue
if t.type == tokenize.ERRORTOKEN:
continue
tokens.append(t)
except tokenize.TokenError as e:
if not e.args[0].startswith('EOF in'):
print(e)
except IndentationError as e:
print(e)
return tokens
示例10: fix_newlines
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def fix_newlines(tokens):
first = True
curline = 1
for token in tokens:
if first:
first = False
curline = token.end[0] + 1
else:
# Fill NEWLINE token in between
while curline < token.start[0]:
yield TokenInfo(type=tokenize.NEWLINE,
string='\n',
start=(curline, 0),
end=(curline, 0),
line='\n', )
curline += 1
curline = token.end[0] + 1
yield token
示例11: tokeneater
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
示例12: is_eol_token
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def is_eol_token(token):
"""Check if the token is an end-of-line token."""
return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n'
示例13: handle_newline
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def handle_newline(self, token_type):
"""Handle the logic when encountering a newline token."""
if token_type == tokenize.NEWLINE:
self.run_logical_checks()
self.processor.reset_blank_before()
elif len(self.processor.tokens) == 1:
# The physical line contains only this token.
self.processor.visited_new_blank_line()
self.processor.delete_first_token()
else:
self.run_logical_checks()
示例14: is_trailing_comma
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def is_trailing_comma(tokens, index):
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = itertools.islice(tokens, index + 1, None)
same_line_remaining_tokens = list(itertools.takewhile(
lambda other_token, _token=token: other_token.start[0] == _token.start[0],
left_tokens
))
# Note: If the newline is tokenize.NEWLINE and not tokenize.NL
# then the newline denotes the end of expression
is_last_element = all(
other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
for other_token in same_line_remaining_tokens
)
if not same_line_remaining_tokens or not is_last_element:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type in (tokenize.NEWLINE, tokenize.NL):
return index - subindex
return 0
curline_start = get_curline_index_start()
for prevtoken in tokens[curline_start:index]:
if '=' in prevtoken.string:
return True
return False
示例15: next_logical_line
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NEWLINE [as 别名]
def next_logical_line(self):
"""Prepares the tracker for a new logical line (NEWLINE).
A new logical line only starts with block indentation.
"""
self.next_physical_line()
self.retained_warnings = []
self._cont_stack = []