当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.untokenize方法代码示例

本文整理汇总了Python中tokenize.untokenize方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.untokenize方法的具体用法?Python tokenize.untokenize怎么用?Python tokenize.untokenize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tokenize的用法示例。


在下文中一共展示了tokenize.untokenize方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def read(filename):
    """
    Read a regular Python file with special formatting and performance
    preprocessing on it.  The result is a string that conforms to the IPython
    notebook version 3 python script format.
    """
    with open(filename, 'rb') as fin:
        token_gen = _generate_tokens(fin.readline)
        cvt_docstr_gen = convert_toplevel_docstring(token_gen)
        nl_gen = fix_newlines(cvt_docstr_gen)
        out = list(nl_gen)

    formatted = tokenize.untokenize(out).decode('utf-8')
    return fix_empty_lines(formatted)


# =============================================================================
#                                   Helpers
# ============================================================================= 
开发者ID:sklam,项目名称:py2nb,代码行数:21,代码来源:reader.py

示例2: prg2py_after_preproc

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def prg2py_after_preproc(data, parser_start, input_filename):
    input_stream = antlr4.InputStream(data)
    lexer = VisualFoxpro9Lexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = VisualFoxpro9Parser(stream)
    tree = run_parser(stream, parser, parser_start)
    TreeCleanVisitor().visit(tree)
    output_tree = PythonConvertVisitor(input_filename).visit(tree)
    if not isinstance(output_tree, list):
        return output_tree
    output = add_indents(output_tree, 0)
    options = autopep8.parse_args(['--max-line-length', '100000', '-'])
    output = autopep8.fix_code(output, options)
    tokens = list(tokenize.generate_tokens(io.StringIO(output).readline))
    for i, token in enumerate(tokens):
        token = list(token)
        if token[0] == tokenize.STRING and token[1].startswith('u'):
            token[1] = token[1][1:]
        tokens[i] = tuple(token)
    return tokenize.untokenize(tokens) 
开发者ID:mwisslead,项目名称:vfp2py,代码行数:22,代码来源:vfp2py.py

示例3: minimize_source

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def minimize_source(source):
    """
    Remove comments and docstrings from Python `source`, preserving line
    numbers and syntax of empty blocks.

    :param str source:
        The source to minimize.

    :returns str:
        The minimized source.
    """
    source = mitogen.core.to_text(source)
    tokens = tokenize.generate_tokens(StringIO(source).readline)
    tokens = strip_comments(tokens)
    tokens = strip_docstrings(tokens)
    tokens = reindent(tokens)
    return tokenize.untokenize(tokens) 
开发者ID:dw,项目名称:mitogen,代码行数:19,代码来源:minify.py

示例4: make_exp

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def make_exp(exp):
    check_type, out_name, concrete = None, None, None

    if len(exp) == 1 and exp[0][1] == "_":
        return (None, None, None)
    if len(exp) == 2 and exp[0][1] == "*" and exp[1][0] == NAME:
        exp = [(OP, "..."), (OP, ":"), exp[1]]

    if exp[0][1] in ("...", "int", "str", "list", "tuple"):
        check_type = exp[0][1]
        exp.pop(0)

    if len(exp) == 2 and exp[0][1] == ":":
        out_name = exp[1][1]

    elif len(exp) > 0:
        concrete = tokenize.untokenize(exp)  # .decode()

    return (check_type, out_name, concrete) 
开发者ID:eveem-org,项目名称:panoramix,代码行数:21,代码来源:tilde.py

示例5: _dedent

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _dedent(s):
    """Dedent python code string."""

    result = [t[:2] for t in generate_tokens(StringIO(s).readline)]
    # set initial indent to 0 if any
    if result[0][0] == INDENT:
        result[0] = (INDENT, '')
    return untokenize(result) 
开发者ID:myhdl,项目名称:myhdl,代码行数:10,代码来源:_util.py

示例6: _filter_header

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    # adding newline as python 2.7.5 workaround
    string = asstr(s) + "\n"
    for token in tokenize.generate_tokens(StringIO(string).readline):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    # removing newline (see above) as python 2.7.5 workaround
    return tokenize.untokenize(tokens)[:-1] 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:41,代码来源:format.py

示例7: _preparse

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _preparse(source, f=_compose(_replace_locals, _replace_booleans,
                                 _rewrite_assign)):
    """Compose a collection of tokenization functions

    Parameters
    ----------
    source : str
        A Python source code string
    f : callable
        This takes a tuple of (toknum, tokval) as its argument and returns a
        tuple with the same structure but possibly different elements. Defaults
        to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
        ``_replace_locals``.

    Returns
    -------
    s : str
        Valid Python source code

    Notes
    -----
    The `f` parameter can be any callable that takes *and* returns input of the
    form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
    the ``tokenize`` module and ``tokval`` is a string.
    """
    assert callable(f), 'f must be callable'
    return tokenize.untokenize(lmap(f, tokenize_string(source))) 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:29,代码来源:expr.py

示例8: _filter_header

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    return tokenize.untokenize(tokens) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:format.py

示例9: _preparse

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
                                _rewrite_assign)):
    """Compose a collection of tokenization functions

    Parameters
    ----------
    source : str
        A Python source code string
    f : callable
        This takes a tuple of (toknum, tokval) as its argument and returns a
        tuple with the same structure but possibly different elements. Defaults
        to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
        ``_replace_locals``.

    Returns
    -------
    s : str
        Valid Python source code

    Notes
    -----
    The `f` parameter can be any callable that takes *and* returns input of the
    form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
    the ``tokenize`` module and ``tokval`` is a string.
    """
    assert callable(f), 'f must be callable'
    return tokenize.untokenize(lmap(f, tokenize_string(source))) 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:29,代码来源:expr.py

示例10: _rewrite_assign

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def _rewrite_assign(source):
    """Rewrite the assignment operator for PyTables expression that want to use
    ``=`` as a substitute for ``==``.
    """
    res = []
    g = tokenize.generate_tokens(StringIO(source).readline)
    for toknum, tokval, _, _, _ in g:
        res.append((toknum, '==' if tokval == '=' else tokval))
    return tokenize.untokenize(res) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:11,代码来源:expr.py

示例11: untokenize

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def untokenize(tokens):
    """
    Converts the output of tokenize.generate_tokens back into a human-readable
    string (that doesn't contain oddly-placed whitespace everywhere).

    .. note::

        Unlike :meth:`tokenize.untokenize`, this function requires the 3rd and
        4th items in each token tuple (though we can use lists *or* tuples).
    """
    out = ""
    last_lineno = -1
    last_col = 0
    for tok in tokens:
        token_string = tok[1]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        # The following two conditionals preserve indentation:
        if start_line > last_lineno:
            last_col = 0
        if start_col > last_col and token_string != '\n':
            out += (" " * (start_col - last_col))
        out += token_string
        last_col = end_col
        last_lineno = end_line
    return out 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:28,代码来源:token_utils.py

示例12: Untokenize

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def Untokenize(offset_tokens):
  """Return the string representation of an iterable of OffsetTokens."""
  # Make a copy. Don't modify the original.
  offset_tokens = collections.deque(offset_tokens)

  # Strip leading NL tokens.
  while offset_tokens[0].type == tokenize.NL:
    offset_tokens.popleft()

  # Strip leading vertical whitespace.
  first_token = offset_tokens.popleft()
  # Take care not to modify the existing token. Create a new one in its place.
  first_token = OffsetToken(first_token.type, first_token.string,
                            (0, first_token.offset[1]))
  offset_tokens.appendleft(first_token)

  # Convert OffsetTokens to tokenize tokens.
  tokenize_tokens = []
  row = 1
  col = 0
  for t in offset_tokens:
    offset_row, offset_col = t.offset
    if offset_row == 0:
      col += offset_col
    else:
      row += offset_row
      col = offset_col
    tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))

  # tokenize can't handle whitespace before line continuations.
  # So add a space.
  return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n') 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:34,代码来源:offset_token.py

示例13: apply_job_security

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def apply_job_security(code):
    """Treat input `code` like Python 2 (implicit strings are byte literals).

    The implementation is horribly inefficient but the goal is to be compatible
    with what Mercurial does at runtime.
    """
    buf = io.BytesIO(code.encode("utf8"))
    tokens = tokenize.tokenize(buf.readline)
    # NOTE: by setting the fullname to `mercurial.pycompat` below, we're
    # ensuring that hg-specific pycompat imports aren't inserted to the code.
    data = tokenize.untokenize(replacetokens(list(tokens), "mercurial.pycompat"))
    return cast(str, data.decode("utf8")) 
开发者ID:ambv,项目名称:retype,代码行数:14,代码来源:retype_hgext.py

示例14: fixLazyJson

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import untokenize [as 别名]
def fixLazyJson (in_text):
    tokengen = tokenize.generate_tokens(StringIO(in_text.decode('ascii')).readline)

    result = []
    for tokid, tokval, _, _, _ in tokengen:
        # fix unquoted strings
        if (tokid == token.NAME):
            if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
                tokid = token.STRING
                tokval = u'"%s"' % tokval

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        # remove invalid commas
        elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
            if (len(result) > 0) and (result[-1][1] == ','):
                result.pop()

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        result.append((tokid, tokval))

    return tokenize.untokenize(result) 
开发者ID:philipodonnell,项目名称:paperbroker,代码行数:31,代码来源:GoogleFinanceQuoteAdapter.py


注:本文中的tokenize.untokenize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。