本文整理匯總了Python中tokenize.NL屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.NL屬性的具體用法?Python tokenize.NL怎麽用?Python tokenize.NL使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類tokenize
的用法示例。
在下文中一共展示了tokenize.NL屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _find_logical
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
示例2: _has_valid_type_annotation
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def _has_valid_type_annotation(self, tokens, i):
"""Extended check of PEP-484 type hint presence"""
if not self._inside_brackets("("):
return False
# token_info
# type string start end line
# 0 1 2 3 4
bracket_level = 0
for token in tokens[i - 1 :: -1]:
if token[1] == ":":
return True
if token[1] == "(":
return False
if token[1] == "]":
bracket_level += 1
elif token[1] == "[":
bracket_level -= 1
elif token[1] == ",":
if not bracket_level:
return False
elif token[1] in (".", "..."):
continue
elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
return False
return False
示例3: process_tokens
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def process_tokens(self, tokens):
encoding = "ascii"
for i, (tok_type, token, start, _, line) in enumerate(tokens):
if tok_type == tokenize.ENCODING:
# this is always the first token processed
encoding = token
elif tok_type == tokenize.STRING:
# 'token' is the whole un-parsed token; we can look at the start
# of it to see whether it's a raw or unicode string etc.
self.process_string_token(token, start[0])
# We figure the next token, ignoring comments & newlines:
j = i + 1
while j < len(tokens) and tokens[j].type in (
tokenize.NEWLINE,
tokenize.NL,
tokenize.COMMENT,
):
j += 1
next_token = tokens[j] if j < len(tokens) else None
if encoding != "ascii":
# We convert `tokenize` character count into a byte count,
# to match with astroid `.col_offset`
start = (start[0], len(line[: start[1]].encode(encoding)))
self.string_tokens[start] = (str_eval(token), next_token)
示例4: __waiting
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def __waiting(self, ttype, tstring, lineno):
opts = self.__options
# Do docstring extractions, if enabled
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
# module docstring?
if self.__freshmodule:
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__freshmodule = 0
elif ttype not in (tokenize.COMMENT, tokenize.NL):
self.__freshmodule = 0
return
# class docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'):
self.__state = self.__suiteseen
return
if ttype == tokenize.NAME and tstring in opts.keywords:
self.__state = self.__keywordseen
示例5: __openseen
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
示例6: remove_docstrings
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def remove_docstrings(tokens):
"""
Removes docstrings from *tokens* which is expected to be a list equivalent
of `tokenize.generate_tokens()` (so we can update in-place).
"""
prev_tok_type = None
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.STRING:
if prev_tok_type == tokenize.INDENT:
# Definitely a docstring
tokens[index][1] = '' # Remove it
# Remove the leftover indentation and newline:
tokens[index - 1][1] = ''
tokens[index - 2][1] = ''
elif prev_tok_type == tokenize.NL:
# This captures whole-module docstrings:
if tokens[index + 1][0] == tokenize.NEWLINE:
tokens[index][1] = ''
# Remove the trailing newline:
tokens[index + 1][1] = ''
prev_tok_type = token_type
示例7: test_tokenizing
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def test_tokenizing(self):
# Test that we produce meaningful tokens on initialization.
source = "import re # comment\n\nfoo = 'bar'\n"
atok = asttokens.ASTTokens(source)
self.assertEqual(atok.text, source)
self.assertEqual([str(t) for t in atok.tokens], [
"NAME:'import'",
"NAME:'re'",
"COMMENT:'# comment'",
"NEWLINE:'\\n'",
"NL:'\\n'",
"NAME:'foo'",
"OP:'='",
'STRING:"\'bar\'"',
"NEWLINE:'\\n'",
"ENDMARKER:''"
])
self.assertEqual(atok.tokens[5].type, token.NAME)
self.assertEqual(atok.tokens[5].string, 'foo')
self.assertEqual(atok.tokens[5].index, 5)
self.assertEqual(atok.tokens[5].startpos, 22)
self.assertEqual(atok.tokens[5].endpos, 25)
示例8: tokeneater
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
示例9: is_single_token
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def is_single_token(token_number, tokens):
'''Is this a single token matching token_number followed by ENDMARKER or NL
tokens.
'''
return (TOKEN_NUMBER(tokens[0]) == token_number and
all(TOKEN_NUMBER(t) in (tokenize.ENDMARKER, tokenize.NL)
for t in tokens[1:]))
示例10: is_trailing_comma
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def is_trailing_comma(tokens, index):
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = itertools.islice(tokens, index + 1, None)
same_line_remaining_tokens = list(itertools.takewhile(
lambda other_token, _token=token: other_token.start[0] == _token.start[0],
left_tokens
))
# Note: If the newline is tokenize.NEWLINE and not tokenize.NL
# then the newline denotes the end of expression
is_last_element = all(
other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
for other_token in same_line_remaining_tokens
)
if not same_line_remaining_tokens or not is_last_element:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type in (tokenize.NEWLINE, tokenize.NL):
return index - subindex
return 0
curline_start = get_curline_index_start()
for prevtoken in tokens[curline_start:index]:
if '=' in prevtoken.string:
return True
return False
示例11: _token_followed_by_eol
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def _token_followed_by_eol(tokens, position):
return (tokens.type(position+1) == tokenize.NL or
tokens.type(position+1) == tokenize.COMMENT and
tokens.type(position+2) == tokenize.NL)
示例12: next_physical_line
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def next_physical_line(self):
"""Prepares the tracker for a new physical line (NL)."""
self._line_start = -1
self._is_block_opener = False
示例13: parse_docstring
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def parse_docstring(self):
"""Parse a single docstring and return its value."""
self.log.debug("parsing docstring, token is %r (%s)",
self.current.kind, self.current.value)
while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):
self.stream.move()
self.log.debug("parsing docstring, token is %r (%s)",
self.current.kind, self.current.value)
if self.current.kind == tk.STRING:
docstring = self.current.value
self.stream.move()
return docstring
return None
示例14: parse_all
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def parse_all(self):
"""Parse the __all__ definition in a module."""
assert self.current.value == '__all__'
self.consume(tk.NAME)
if self.current.value != '=':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
if self.current.value not in '([':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
self.all = []
all_content = "("
while self.current.kind != tk.OP or self.current.value not in ")]":
if self.current.kind in (tk.NL, tk.COMMENT):
pass
elif (self.current.kind == tk.STRING or
self.current.value == ','):
all_content += self.current.value
else:
raise AllError('Unexpected token kind in __all__: {!r}. '
.format(self.current.kind))
self.stream.move()
self.consume(tk.OP)
all_content += ")"
try:
self.all = eval(all_content, {})
except BaseException as e:
raise AllError('Could not evaluate contents of __all__.'
'\bThe value was {}. The exception was:\n{}'
.format(all_content, e))
示例15: whitespace_before_comment
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NL [as 別名]
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
E266: ### Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#')
if inline_comment:
if bad_prefix or comment[:1] in WHITESPACE:
yield start, "E262 inline comment should start with '# '"
elif bad_prefix and (bad_prefix != '!' or start[0] > 1):
if bad_prefix != '#':
yield start, "E265 block comment should start with '# '"
elif comment:
yield start, "E266 too many leading '#' for block comment"
elif token_type != tokenize.NL:
prev_end = end