當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.NEWLINE屬性代碼示例

本文整理匯總了Python中tokenize.NEWLINE屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.NEWLINE屬性的具體用法?Python tokenize.NEWLINE怎麽用?Python tokenize.NEWLINE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tokenize的用法示例。


在下文中一共展示了tokenize.NEWLINE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_type

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def get_type(tokens, start_index):
    """return the line type : docstring, comment, code, empty"""
    i = start_index
    tok_type = tokens[i][0]
    start = tokens[i][2]
    pos = start
    line_type = None
    while i < len(tokens) and tokens[i][2][0] == start[0]:
        tok_type = tokens[i][0]
        pos = tokens[i][3]
        if line_type is None:
            if tok_type == tokenize.STRING:
                line_type = 'docstring_lines'
            elif tok_type == tokenize.COMMENT:
                line_type = 'comment_lines'
            elif tok_type in JUNK:
                pass
            else:
                line_type = 'code_lines'
        i += 1
    if line_type is None:
        line_type = 'empty_lines'
    elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
        i += 1
    return i, pos[0] - start[0] + 1, line_type 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:27,代碼來源:raw_metrics.py

示例2: _find_logical

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:autopep8.py

示例3: get_type

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def get_type(tokens, start_index):
    """return the line type : docstring, comment, code, empty"""
    i = start_index
    tok_type = tokens[i][0]
    start = tokens[i][2]
    pos = start
    line_type = None
    while i < len(tokens) and tokens[i][2][0] == start[0]:
        tok_type = tokens[i][0]
        pos = tokens[i][3]
        if line_type is None:
            if tok_type == tokenize.STRING:
                line_type = "docstring_lines"
            elif tok_type == tokenize.COMMENT:
                line_type = "comment_lines"
            elif tok_type in JUNK:
                pass
            else:
                line_type = "code_lines"
        i += 1
    if line_type is None:
        line_type = "empty_lines"
    elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
        i += 1
    return i, pos[0] - start[0] + 1, line_type 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:27,代碼來源:raw_metrics.py

示例4: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:strings.py

示例5: scanvars

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
開發者ID:glmcdona,項目名稱:meddle,代碼行數:22,代碼來源:cgitb.py

示例6: __openseen

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
開發者ID:aliyun,項目名稱:oss-ftp,代碼行數:24,代碼來源:pygettext.py

示例7: enumerate_keyword_args

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index + 1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index + 1][1] == '=':  # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:23,代碼來源:analyze.py

示例8: remove_docstrings

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def remove_docstrings(tokens):
    """
    Removes docstrings from *tokens* which is expected to be a list equivalent
    of `tokenize.generate_tokens()` (so we can update in-place).
    """
    prev_tok_type = None
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        if token_type == tokenize.STRING:
            if prev_tok_type == tokenize.INDENT:
                # Definitely a docstring
                tokens[index][1] = ''  # Remove it
                # Remove the leftover indentation and newline:
                tokens[index - 1][1] = ''
                tokens[index - 2][1] = ''
            elif prev_tok_type == tokenize.NL:
                # This captures whole-module docstrings:
                if tokens[index + 1][0] == tokenize.NEWLINE:
                    tokens[index][1] = ''
                    # Remove the trailing newline:
                    tokens[index + 1][1] = ''
        prev_tok_type = token_type 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:24,代碼來源:minification.py

示例9: __get_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def __get_tokens(it):
        tokens: List[tokenize.TokenInfo] = []

        try:
            for t in it:
                if t.type in tokenizer.SKIP_TOKENS:
                    continue
                if t.type == tokenize.NEWLINE and t.string == '':
                    continue
                if t.type == tokenize.DEDENT:
                    continue
                if t.type == tokenize.ERRORTOKEN:
                    continue
                tokens.append(t)
        except tokenize.TokenError as e:
            if not e.args[0].startswith('EOF in'):
                print(e)
        except IndentationError as e:
            print(e)

        return tokens 
開發者ID:vpj,項目名稱:python_autocomplete,代碼行數:23,代碼來源:evaluate.py

示例10: fix_newlines

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def fix_newlines(tokens):
    first = True
    curline = 1
    for token in tokens:
        if first:
            first = False
            curline = token.end[0] + 1
        else:
            # Fill NEWLINE token in between
            while curline < token.start[0]:
                yield TokenInfo(type=tokenize.NEWLINE,
                                string='\n',
                                start=(curline, 0),
                                end=(curline, 0),
                                line='\n', )
                curline += 1

            curline = token.end[0] + 1
        yield token 
開發者ID:sklam,項目名稱:py2nb,代碼行數:21,代碼來源:reader.py

示例11: tokeneater

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def tokeneater(self, type, token, srowcol, erowcol, line):
        if not self.started:
            # look for the first "def", "class" or "lambda"
            if token in ("def", "class", "lambda"):
                if token == "lambda":
                    self.islambda = True
                self.started = True
            self.passline = True    # skip to the end of the line
        elif type == tokenize.NEWLINE:
            self.passline = False   # stop skipping when a NEWLINE is seen
            self.last = srowcol[0]
            if self.islambda:       # lambdas always end at the first NEWLINE
                raise EndOfBlock
        elif self.passline:
            pass
        elif type == tokenize.INDENT:
            self.indent = self.indent + 1
            self.passline = True
        elif type == tokenize.DEDENT:
            self.indent = self.indent - 1
            # the end of matching indent/dedent pairs end a block
            # (note that this only works for "def"/"class" blocks,
            #  not e.g. for "if: else:" or "try: finally:" blocks)
            if self.indent <= 0:
                raise EndOfBlock
        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
            # any other token on the same indentation level end the previous
            # block as well, except the pseudo-tokens COMMENT and NL.
            raise EndOfBlock 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:31,代碼來源:inspect.py

示例12: is_eol_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def is_eol_token(token):
    """Check if the token is an end-of-line token."""
    return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:5,代碼來源:processor.py

示例13: handle_newline

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def handle_newline(self, token_type):
        """Handle the logic when encountering a newline token."""
        if token_type == tokenize.NEWLINE:
            self.run_logical_checks()
            self.processor.reset_blank_before()
        elif len(self.processor.tokens) == 1:
            # The physical line contains only this token.
            self.processor.visited_new_blank_line()
            self.processor.delete_first_token()
        else:
            self.run_logical_checks() 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:13,代碼來源:checker.py

示例14: is_trailing_comma

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def is_trailing_comma(tokens, index):
    """Check if the given token is a trailing comma

    :param tokens: Sequence of modules tokens
    :type tokens: list[tokenize.TokenInfo]
    :param int index: Index of token under check in tokens
    :returns: True if the token is a comma which trails an expression
    :rtype: bool
    """
    token = tokens[index]
    if token.exact_type != tokenize.COMMA:
        return False
    # Must have remaining tokens on the same line such as NEWLINE
    left_tokens = itertools.islice(tokens, index + 1, None)
    same_line_remaining_tokens = list(itertools.takewhile(
        lambda other_token, _token=token: other_token.start[0] == _token.start[0],
        left_tokens
    ))
    # Note: If the newline is tokenize.NEWLINE and not tokenize.NL
    # then the newline denotes the end of expression
    is_last_element = all(
        other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
        for other_token in same_line_remaining_tokens
    )
    if not same_line_remaining_tokens or not is_last_element:
        return False
    def get_curline_index_start():
        """Get the index denoting the start of the current line"""
        for subindex, token in enumerate(reversed(tokens[:index])):
            # See Lib/tokenize.py and Lib/token.py in cpython for more info
            if token.type in (tokenize.NEWLINE, tokenize.NL):
                return index - subindex
        return 0
    curline_start = get_curline_index_start()
    for prevtoken in tokens[curline_start:index]:
        if '=' in prevtoken.string:
            return True
    return False 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:40,代碼來源:refactoring.py

示例15: next_logical_line

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NEWLINE [as 別名]
def next_logical_line(self):
        """Prepares the tracker for a new logical line (NEWLINE).

        A new logical line only starts with block indentation.
        """
        self.next_physical_line()
        self.retained_warnings = []
        self._cont_stack = [] 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:10,代碼來源:format.py


注:本文中的tokenize.NEWLINE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。