当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.untokenize函数代码示例

本文整理汇总了Python中tokenize.untokenize函数的典型用法代码示例。如果您正苦于以下问题:Python untokenize函数的具体用法?Python untokenize怎么用?Python untokenize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了untokenize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_roundtrip

    def check_roundtrip(self, f):
        """
        Test roundtrip for `untokenize`. `f` is an open file or a string.
        The source code in f is tokenized to both 5- and 2-tuples.
        Both sequences are converted back to source code via
        tokenize.untokenize(), and the latter tokenized again to 2-tuples.
        The test fails if the 3 pair tokenizations do not match.

        When untokenize bugs are fixed, untokenize with 5-tuples should
        reproduce code that does not contain a backslash continuation
        following spaces.  A proper test should test this.
        """
        # Get source code and original tokenizations
        if isinstance(f, str):
            code = f.encode('utf-8')
        else:
            code = f.read()
            f.close()
        readline = iter(code.splitlines(keepends=True)).__next__
        tokens5 = list(tokenize(readline))
        tokens2 = [tok[:2] for tok in tokens5]
        # Reproduce tokens2 from pairs
        bytes_from2 = untokenize(tokens2)
        readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__
        tokens2_from2 = [tok[:2] for tok in tokenize(readline2)]
        self.assertEqual(tokens2_from2, tokens2)
        # Reproduce tokens2 from 5-tuples
        bytes_from5 = untokenize(tokens5)
        readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__
        tokens2_from5 = [tok[:2] for tok in tokenize(readline5)]
        self.assertEqual(tokens2_from5, tokens2)
开发者ID:chidea,项目名称:GoPythonDLLWrapper,代码行数:31,代码来源:test_tokenize.py

示例2: __init__

    def __init__(self, tokens, filename='<unknown>', line_offset=0):
        """Create an executor for a token stream

        Arguments:
            tokens (List[TokenInfo]): The tokens to execute.
            filename (Optional[str]): The filename where the tokens originated
                                      (default: ``'<unknown>'``).
                                      Used in error handling, but never opened.
            line_offset (Optional[str]): An offset of tokens within the input
                                         file (default: zero).

        Raises:
            RuleExecutionError: Raised if the token stream is invalid or if
                                it could not be compiled.
        """

        self.input_tokens = tokens
        self.input_lines = tokenize.untokenize(self.input_tokens).split('\n')
        self.filename = filename
        self.line_offset = line_offset

        self._validate_paren_levels(tokens)
        self.eval_tokens = self._gen_eval_tokens(tokens)
        self.eval_str = tokenize.untokenize(self.eval_tokens)
        self.codeobj = self._compile(self.eval_str)
开发者ID:Grk0,项目名称:docker-cleanup,代码行数:25,代码来源:tokenrunner.py

示例3: dealwith

 def dealwith(self, readline, **kwargs):
     """
         Replace the contents of spec file with the translated version
         readline should be a callable object
         , which provides the same interface as the readline() method of built-in file objects
     """
     data = []
     try:
         # We pass in the data variable as an argument so that we
         # get partial output even in the case of an exception.
         self.tokeniser.translate(readline, data, **kwargs)
     except Exception as e:
         # Comment out partial output so that it doesn't result in
         # a syntax error when received by the interpreter.
         lines = []
         for line in untokenize(data).split('\n'):
             lines.append("# %s" % line)
         
         # Create exception to put into code to announce error
         exception = 'raise Exception("""--- internal spec codec error --- %s""")' % e
         
         # Need to make sure the exception doesn't add a new line and put out line numberes
         if len(lines) == 1:
             data = "%s%s" % (exception, lines[0])
         else:
             lines.append(exception)
             first_line = lines.pop()
             lines[0] = "%s %s" % (first_line, lines[0])
             data = '\n'.join(lines)
     else:
         # At this point, data is a list of tokens
         data = untokenize(data)
     
     return data
开发者ID:benauthor,项目名称:nose-of-yeti,代码行数:34,代码来源:spec_codec.py

示例4: get_context

def get_context(source, position):
    lines, lineno = get_block(source, position)

    tokens = TokenGenerator(lines)
    ctype, ctx, match, fctx = 'expr', '', '', ''
    while True:
        tid, value = tokens.next()
        if not tid: break

        if tid == NAME and value == 'import':
            ctype, fctx = 'import', None
            ctx, match = parse_import(tokens)

        elif tid == NAME and value == 'from':
            fctx = None
            ctype, ctx, match = parse_from(tokens)

        elif tid == NAME or value in BRACKETS.keys():
            ctype = 'expr'
            tokens.hold(tid, value)
            ctx, match, fctx = parse_expr(tokens)
            ctx = untokenize(prep_tokens(ctx)).strip().rstrip('.')
            fctx = untokenize(prep_tokens(fctx)).strip().rstrip('.')

        else:
            ctype, ctx, match, fctx = 'expr', '', '', ''

    return ctype, lineno, ctx, match, fctx
开发者ID:corranwebster,项目名称:supplement,代码行数:28,代码来源:assistant.py

示例5: main

def main():
    py_input = """exec admin 'show info'
print 'hello'
exec sql 'select * from namespace1'\n"""
    print py_input
    py_stream = cStringIO.StringIO(py_input)
    print tokenize.untokenize(tarantool_translate(py_stream.readline))
开发者ID:spbirhade,项目名称:tarantool,代码行数:7,代码来源:tarantool_preprocessor.py

示例6: main

def main():
    """Executed when script is run as-is."""
    # magic_files = {}
    for filename in locate_files(ROOT_DIR):
        print("Processing %s" % filename)
        with open(filename, "rt") as f:
            tokens = list(tokenize.generate_tokens(f.readline))
            text1 = tokenize.untokenize(tokens)
            ntokens = normalize_tokens(tokens)
            text2 = tokenize.untokenize(ntokens)
            assert text1 == text2
开发者ID:StevenLOL,项目名称:h2o-3,代码行数:11,代码来源:pymagic.py

示例7: nocomment

def nocomment(s):
    result = []
    g = tokenize.generate_tokens(io.BytesIO(s).readline)  
    for toknum, tokval, _, _, _  in g:
        if toknum != tokenize.COMMENT:
            result.append((toknum, tokval))
    return tokenize.untokenize(result)
开发者ID:gokycat,项目名称:LexisNexis,代码行数:7,代码来源:reader.py

示例8: convert

def convert(readline):
    result = []
    in_repeat = False

    for ttype, tval, _, _, _ in tokenize.generate_tokens(readline):
        if ttype == token.NAME and tval == "repeat":
            result.extend([
                (token.NAME, "for"),
                (token.NAME, "_"),
                (token.NAME, "in"),
                (token.NAME, "range"),
                (token.OP, "(")
            ])
            in_repeat = True

        elif in_repeat and ttype == token.OP and tval == ":":
            result.extend([
                (token.NAME, ")"),
                (token.OP, ":")
            ])

        else:
            result.append((ttype, tval))

    return tokenize.untokenize(result)
开发者ID:bravegnu,项目名称:python-new-stmt,代码行数:25,代码来源:pyturtle.py

示例9: commandline

def commandline():
    """zhpy3, the python language in Traditional Chinese

    usage: twpy file.twpy
    """
    if len(sys.argv) != 2:
        print(commandline.__doc__)
        sys.exit(1)

    file_path = sys.argv[1]

    if not os.path.exists(file_path):
        print("twpy: file '%s' does not exists" % file_path)
        sys.exit(1)

    #sys.meta_path = [ImportHook()]

    sys.path[0] = os.path.dirname(os.path.join(os.getcwd(), file_path))

    source = tokenize.untokenize(
            list(translate_code(open(file_path).readline, translations)))

    #translate_module(__builtins__)

    code = compile(source, file_path, "exec")

    runpy._run_module_code(code, mod_name="__main__")
开发者ID:Abelisme,项目名称:zhpy,代码行数:27,代码来源:twpy.py

示例10: feedInput

def feedInput(code, test) :
	# Initial variable declaration
	temp = 0
	i = 0
	limit = len(test)
	# Tokenize the code
	g = tokenize.generate_tokens(io.BytesIO("\n".join(code)).readline)
	result = []
	# Traverse for each token
	for toknum, tokval, _, _, _ in g:
		# True if an input statement wasnt found 3 tokens prior
		if(temp==0) :
			# True is there are test cases to be inputed and token found happens to be input
			if(i<limit and tokval=="input") :
				# replace token with value
				result.append((toknum, test[i]))
				i += 1
				temp = 3
			else :
				result.append((toknum, tokval))
		else :
			# Input was found
			temp -= 1
	# Return the untokenized form of code in form of list
	return tokenize.untokenize(result).split("\n")
开发者ID:nishu94,项目名称:programming,代码行数:25,代码来源:StatementCoverage.py

示例11: decistmt

def decistmt(s):
    """Substitute Decimals for floats in a string of statements.

    >>> from decimal import Decimal
    >>> s = 'print +21.3e-5*-.1234/81.7'
    >>> decistmt(s)
    "print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')"

    >>> exec(s)
    -3.21716034272e-007
    >>> exec(decistmt(s))
    -3.217160342717258261933904529E-7
    """
    result = []
    # tokenize the string
    g = tokenize.generate_tokens(StringIO(s).readline)
    for toknum, tokval, _, _, _ in g:
        # replace NUMBER tokens
        if toknum == tokenize.NUMBER and '.' in tokval:
            result.extend([
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ])
        else:
            result.append((toknum, tokval))
    return tokenize.untokenize(result)
开发者ID:ferjavrec,项目名称:product_price_list,代码行数:28,代码来源:price_list.py

示例12: test_DeleteStatement_valid

def test_DeleteStatement_valid(input, expected_type, expected_expr):
    smt = parser.ExpressionStatement.try_parse(tok(input))

    str_expr = tokenize.untokenize(smt.expr).strip()

    assert smt.type == expected_type
    assert str_expr == expected_expr
开发者ID:Grk0,项目名称:docker-cleanup,代码行数:7,代码来源:test_parser.py

示例13: tostring

def tostring(tokens):
    '''Converte lista de tokens para string'''

    last_pos = tokens[0].start

    while tokens[-1].type == DEDENT:
        tokens.pop()

    if tokens[-1].type != ENDMARKER:
        start = end = tokens[-1].end
        tokens.append(tknew(ENDMARKER, '', start, end, line=''))

    # tkprint(tokens)

    tokens = [tk.to_token_info() for tk in tokens]
    try:
        return tokenize.untokenize(tokens)
    except ValueError:
        for idx, tk in enumerate(tokens):
            a, b = tk.start
            c, d = last_pos
            if (a < c) or (a == c and d > b):
                fmt = idx, tokens[idx - 1], tk
                print(tokens)
                raise ValueError(
                    'tokens sobrepõe a partir de #%s:\n\t%s\n\t%s)' % fmt)
            last_pos = tk.end
        else:
            raise
开发者ID:EuPaulo,项目名称:pytuga,代码行数:29,代码来源:plylexer.py

示例14: main

def main():
    import tempfile
    if sys.argv[1] == '-p':
        file = sys.argv[2]
        print_script = True
        tree = maketree(Tokens(file), preamble=True)
    else:
        file = sys.argv[1]
        print_script = False
        tree = maketree(Tokens(file))

    try:
        code = tokenize.untokenize(flatten(tree)).decode()
    except:
        pprint(tree, indent=4)
        raise
    if print_script:
        print(code)
        sys.exit()

    del sys.argv[0]

    tf = tempfile.NamedTemporaryFile('w')
    tf.write(code)
    tf.flush()
    ns = {'__name__': '__main__'}
    exec(PREAMBLE, ns)
    try:
        exec(compile(code, tf.name, 'exec'), ns)
    except Exception as e:
        # pprint(tree, indent=4)
        print(code)
        raise
开发者ID:ninjaaron,项目名称:eggshell,代码行数:33,代码来源:preprocessor.py

示例15: __init__

 def __init__(self, *args, **kwargs):
     codecs.StreamReader.__init__(self, *args, **kwargs)
     data = tokenize.untokenize(translate(self.stream.readline))
     logging.debug('START RESULT')
     logging.debug(data)
     logging.debug('END RESULT')
     self.stream = StringIO.StringIO(data)
开发者ID:pombredanne,项目名称:pypatt_python_pattern_matching,代码行数:7,代码来源:codec.py


注:本文中的tokenize.untokenize函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。