當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.untokenize方法代碼示例

本文整理匯總了Python中tokenize.untokenize方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.untokenize方法的具體用法?Python tokenize.untokenize怎麽用?Python tokenize.untokenize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tokenize的用法示例。


在下文中一共展示了tokenize.untokenize方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: read

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def read(filename):
    """
    Read a regular Python file with special formatting and performance
    preprocessing on it.  The result is a string that conforms to the IPython
    notebook version 3 python script format.
    """
    with open(filename, 'rb') as fin:
        token_gen = _generate_tokens(fin.readline)
        cvt_docstr_gen = convert_toplevel_docstring(token_gen)
        nl_gen = fix_newlines(cvt_docstr_gen)
        out = list(nl_gen)

    formatted = tokenize.untokenize(out).decode('utf-8')
    return fix_empty_lines(formatted)


# =============================================================================
#                                   Helpers
# ============================================================================= 
開發者ID:sklam,項目名稱:py2nb,代碼行數:21,代碼來源:reader.py

示例2: prg2py_after_preproc

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def prg2py_after_preproc(data, parser_start, input_filename):
    input_stream = antlr4.InputStream(data)
    lexer = VisualFoxpro9Lexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = VisualFoxpro9Parser(stream)
    tree = run_parser(stream, parser, parser_start)
    TreeCleanVisitor().visit(tree)
    output_tree = PythonConvertVisitor(input_filename).visit(tree)
    if not isinstance(output_tree, list):
        return output_tree
    output = add_indents(output_tree, 0)
    options = autopep8.parse_args(['--max-line-length', '100000', '-'])
    output = autopep8.fix_code(output, options)
    tokens = list(tokenize.generate_tokens(io.StringIO(output).readline))
    for i, token in enumerate(tokens):
        token = list(token)
        if token[0] == tokenize.STRING and token[1].startswith('u'):
            token[1] = token[1][1:]
        tokens[i] = tuple(token)
    return tokenize.untokenize(tokens) 
開發者ID:mwisslead,項目名稱:vfp2py,代碼行數:22,代碼來源:vfp2py.py

示例3: minimize_source

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def minimize_source(source):
    """
    Remove comments and docstrings from Python `source`, preserving line
    numbers and syntax of empty blocks.

    :param str source:
        The source to minimize.

    :returns str:
        The minimized source.
    """
    source = mitogen.core.to_text(source)
    tokens = tokenize.generate_tokens(StringIO(source).readline)
    tokens = strip_comments(tokens)
    tokens = strip_docstrings(tokens)
    tokens = reindent(tokens)
    return tokenize.untokenize(tokens) 
開發者ID:dw,項目名稱:mitogen,代碼行數:19,代碼來源:minify.py

示例4: make_exp

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def make_exp(exp):
    check_type, out_name, concrete = None, None, None

    if len(exp) == 1 and exp[0][1] == "_":
        return (None, None, None)
    if len(exp) == 2 and exp[0][1] == "*" and exp[1][0] == NAME:
        exp = [(OP, "..."), (OP, ":"), exp[1]]

    if exp[0][1] in ("...", "int", "str", "list", "tuple"):
        check_type = exp[0][1]
        exp.pop(0)

    if len(exp) == 2 and exp[0][1] == ":":
        out_name = exp[1][1]

    elif len(exp) > 0:
        concrete = tokenize.untokenize(exp)  # .decode()

    return (check_type, out_name, concrete) 
開發者ID:eveem-org,項目名稱:panoramix,代碼行數:21,代碼來源:tilde.py

示例5: _dedent

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _dedent(s):
    """Dedent python code string."""

    result = [t[:2] for t in generate_tokens(StringIO(s).readline)]
    # set initial indent to 0 if any
    if result[0][0] == INDENT:
        result[0] = (INDENT, '')
    return untokenize(result) 
開發者ID:myhdl,項目名稱:myhdl,代碼行數:10,代碼來源:_util.py

示例6: _filter_header

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    # adding newline as python 2.7.5 workaround
    string = asstr(s) + "\n"
    for token in tokenize.generate_tokens(StringIO(string).readline):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    # removing newline (see above) as python 2.7.5 workaround
    return tokenize.untokenize(tokens)[:-1] 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:41,代碼來源:format.py

示例7: _preparse

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _preparse(source, f=_compose(_replace_locals, _replace_booleans,
                                 _rewrite_assign)):
    """Compose a collection of tokenization functions

    Parameters
    ----------
    source : str
        A Python source code string
    f : callable
        This takes a tuple of (toknum, tokval) as its argument and returns a
        tuple with the same structure but possibly different elements. Defaults
        to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
        ``_replace_locals``.

    Returns
    -------
    s : str
        Valid Python source code

    Notes
    -----
    The `f` parameter can be any callable that takes *and* returns input of the
    form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
    the ``tokenize`` module and ``tokval`` is a string.
    """
    assert callable(f), 'f must be callable'
    return tokenize.untokenize(lmap(f, tokenize_string(source))) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:29,代碼來源:expr.py

示例8: _filter_header

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    return tokenize.untokenize(tokens) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:38,代碼來源:format.py

示例9: _preparse

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
                                _rewrite_assign)):
    """Compose a collection of tokenization functions

    Parameters
    ----------
    source : str
        A Python source code string
    f : callable
        This takes a tuple of (toknum, tokval) as its argument and returns a
        tuple with the same structure but possibly different elements. Defaults
        to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
        ``_replace_locals``.

    Returns
    -------
    s : str
        Valid Python source code

    Notes
    -----
    The `f` parameter can be any callable that takes *and* returns input of the
    form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
    the ``tokenize`` module and ``tokval`` is a string.
    """
    assert callable(f), 'f must be callable'
    return tokenize.untokenize(lmap(f, tokenize_string(source))) 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:29,代碼來源:expr.py

示例10: _rewrite_assign

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def _rewrite_assign(source):
    """Rewrite the assignment operator for PyTables expression that want to use
    ``=`` as a substitute for ``==``.
    """
    res = []
    g = tokenize.generate_tokens(StringIO(source).readline)
    for toknum, tokval, _, _, _ in g:
        res.append((toknum, '==' if tokval == '=' else tokval))
    return tokenize.untokenize(res) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:11,代碼來源:expr.py

示例11: untokenize

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def untokenize(tokens):
    """
    Converts the output of tokenize.generate_tokens back into a human-readable
    string (that doesn't contain oddly-placed whitespace everywhere).

    .. note::

        Unlike :meth:`tokenize.untokenize`, this function requires the 3rd and
        4th items in each token tuple (though we can use lists *or* tuples).
    """
    out = ""
    last_lineno = -1
    last_col = 0
    for tok in tokens:
        token_string = tok[1]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        # The following two conditionals preserve indentation:
        if start_line > last_lineno:
            last_col = 0
        if start_col > last_col and token_string != '\n':
            out += (" " * (start_col - last_col))
        out += token_string
        last_col = end_col
        last_lineno = end_line
    return out 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:28,代碼來源:token_utils.py

示例12: Untokenize

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def Untokenize(offset_tokens):
  """Return the string representation of an iterable of OffsetTokens."""
  # Make a copy. Don't modify the original.
  offset_tokens = collections.deque(offset_tokens)

  # Strip leading NL tokens.
  while offset_tokens[0].type == tokenize.NL:
    offset_tokens.popleft()

  # Strip leading vertical whitespace.
  first_token = offset_tokens.popleft()
  # Take care not to modify the existing token. Create a new one in its place.
  first_token = OffsetToken(first_token.type, first_token.string,
                            (0, first_token.offset[1]))
  offset_tokens.appendleft(first_token)

  # Convert OffsetTokens to tokenize tokens.
  tokenize_tokens = []
  row = 1
  col = 0
  for t in offset_tokens:
    offset_row, offset_col = t.offset
    if offset_row == 0:
      col += offset_col
    else:
      row += offset_row
      col = offset_col
    tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))

  # tokenize can't handle whitespace before line continuations.
  # So add a space.
  return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n') 
開發者ID:FSecureLABS,項目名稱:Jandroid,代碼行數:34,代碼來源:offset_token.py

示例13: apply_job_security

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def apply_job_security(code):
    """Treat input `code` like Python 2 (implicit strings are byte literals).

    The implementation is horribly inefficient but the goal is to be compatible
    with what Mercurial does at runtime.
    """
    buf = io.BytesIO(code.encode("utf8"))
    tokens = tokenize.tokenize(buf.readline)
    # NOTE: by setting the fullname to `mercurial.pycompat` below, we're
    # ensuring that hg-specific pycompat imports aren't inserted to the code.
    data = tokenize.untokenize(replacetokens(list(tokens), "mercurial.pycompat"))
    return cast(str, data.decode("utf8")) 
開發者ID:ambv,項目名稱:retype,代碼行數:14,代碼來源:retype_hgext.py

示例14: fixLazyJson

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import untokenize [as 別名]
def fixLazyJson (in_text):
    tokengen = tokenize.generate_tokens(StringIO(in_text.decode('ascii')).readline)

    result = []
    for tokid, tokval, _, _, _ in tokengen:
        # fix unquoted strings
        if (tokid == token.NAME):
            if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
                tokid = token.STRING
                tokval = u'"%s"' % tokval

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        # remove invalid commas
        elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
            if (len(result) > 0) and (result[-1][1] == ','):
                result.pop()

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        result.append((tokid, tokval))

    return tokenize.untokenize(result) 
開發者ID:philipodonnell,項目名稱:paperbroker,代碼行數:31,代碼來源:GoogleFinanceQuoteAdapter.py


注:本文中的tokenize.untokenize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。