本文整理汇总了Python中astor.to_source函数的典型用法代码示例。如果您正苦于以下问题:Python to_source函数的具体用法?Python to_source怎么用?Python to_source使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_source函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: output
def output(cls, node1, node2, operator_name):
"""
Compare the original source code and mutant.
:param node1:
:param node2:
:return:
"""
if not os.path.exists(os.path.curdir + '/output'):
os.mkdir(os.path.curdir + '/output')
dest_dir = os.path.curdir + '/output/'
if not os.path.isfile(dest_dir + 'original.py'):
# write the original code to a file
# original_code = codegen.to_source(node1)
original_code = astor.to_source(node1, add_line_information=True)
filename = "original.py"
path = os.path.join(dest_dir, filename)
cls.write_to_file(path, original_code)
# write the mutated code to a file
# mutated_code = codegen.to_source(node2)
mutated_code = astor.to_source(node2, add_line_information=True)
filename = None
while True:
timestamp = str(int(time.time()))
filename = operator_name + "_mutant_" + timestamp + ".py"
if not os.path.isfile(dest_dir + filename):
break
path = os.path.join(dest_dir, filename)
cls.write_to_file(path, mutated_code)
示例2: compare
def compare(input_src, expected_output_src, transformer_class):
"""
Testing utility. Takes the input source and transforms it with
`transformer_class`. It then compares the output with the given
reference and throws an exception if they don't match.
This method also deals with name-mangling.
"""
uid = naming.UniqueIdentifierFactory()
actual_root = ast.parse(unindent(input_src))
EncodeNames().visit(actual_root)
actual_root = transformer_class(uid).checked_visit(actual_root)
actual_root = ast.fix_missing_locations(actual_root)
compile(actual_root, "<string>", 'exec')
actual_src = astor.to_source(actual_root)
expected_root = ast.parse(unindent(expected_output_src))
EncodeNames().visit(expected_root)
expected_src = astor.to_source(expected_root)
cmps = itertools.izip_longest(expected_src.splitlines(), actual_src.splitlines())
for linenr, c in enumerate(cmps, 1):
expected_line = c[0]
actual_line = c[1]
if expected_line != actual_line:
sys.stderr.write(actual_src)
sys.stderr.write("\n")
if expected_line != actual_line:
raise AssertionError("Line %s differs. Expected %s but got %s." % (linenr, repr(expected_line), repr(actual_line)))
示例3: run
def run():
asdl_text = open('asdl/lang/py/py_asdl.txt').read()
grammar = ASDLGrammar.from_text(asdl_text)
annot_file = 'data/django/all.anno'
code_file = 'data/django/all.code'
transition_system = PythonTransitionSystem(grammar)
for idx, (src_query, tgt_code) in enumerate(zip(open(annot_file), open(code_file))):
src_query = src_query.strip()
tgt_code = tgt_code.strip()
query_tokens, tgt_canonical_code, str_map = Django.canonicalize_example(src_query, tgt_code)
python_ast = ast.parse(tgt_canonical_code).body[0]
gold_source = astor.to_source(python_ast)
tgt_ast = python_ast_to_asdl_ast(python_ast, grammar)
tgt_actions = transition_system.get_actions(tgt_ast)
# sanity check
hyp = Hypothesis()
hyp2 = Hypothesis()
for action in tgt_actions:
assert action.__class__ in transition_system.get_valid_continuation_types(hyp)
if isinstance(action, ApplyRuleAction):
assert action.production in transition_system.get_valid_continuating_productions(hyp)
hyp = hyp.clone_and_apply_action(action)
hyp2.apply_action(action)
src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar))
assert src_from_hyp == gold_source
assert hyp.tree == hyp2.tree and hyp.tree is not hyp2.tree
print(idx)
示例4: extract_grammar
def extract_grammar(code_file, prefix='py'):
line_num = 0
parse_trees = []
for line in open(code_file):
code = line.strip()
parse_tree = parse(code)
# leaves = parse_tree.get_leaves()
# for leaf in leaves:
# if not is_terminal_type(leaf.type):
# print parse_tree
# parse_tree = add_root(parse_tree)
parse_trees.append(parse_tree)
# sanity check
ast_tree = parse_tree_to_python_ast(parse_tree)
ref_ast_tree = ast.parse(canonicalize_code(code)).body[0]
source1 = astor.to_source(ast_tree)
source2 = astor.to_source(ref_ast_tree)
assert source1 == source2
# check rules
# rule_list = parse_tree.get_rule_list(include_leaf=True)
# for rule in rule_list:
# if rule.parent.type == int and rule.children[0].type == int:
# # rule.parent.type == str and rule.children[0].type == str:
# pass
# ast_tree = tree_to_ast(parse_tree)
# print astor.to_source(ast_tree)
# print parse_tree
# except Exception as e:
# error_num += 1
# #pass
# #print e
line_num += 1
print 'total line of code: %d' % line_num
grammar = get_grammar(parse_trees)
with open(prefix + '.grammar.txt', 'w') as f:
for rule in grammar:
str = rule.__repr__()
f.write(str + '\n')
with open(prefix + '.parse_trees.txt', 'w') as f:
for tree in parse_trees:
f.write(tree.__repr__() + '\n')
return grammar, parse_trees
示例5: process_query
def process_query(query, code):
from parse import code_to_ast, ast_to_tree, tree_to_ast, parse
import astor
str_count = 0
str_map = dict()
match_count = 1
match = QUOTED_STRING_RE.search(query)
while match:
str_repr = '_STR:%d_' % str_count
str_literal = match.group(0)
str_string = match.group(2)
match_count += 1
# if match_count > 50:
# return
#
query = QUOTED_STRING_RE.sub(str_repr, query, 1)
str_map[str_literal] = str_repr
str_count += 1
match = QUOTED_STRING_RE.search(query)
code = code.replace(str_literal, '\'' + str_repr + '\'')
# clean the annotation
# query = query.replace('.', ' . ')
for k, v in str_map.iteritems():
if k == '\'%s\'' or k == '\"%s\"':
query = query.replace(v, k)
code = code.replace('\'' + v + '\'', k)
# tokenize
query_tokens = nltk.word_tokenize(query)
new_query_tokens = []
# break up function calls
for token in query_tokens:
new_query_tokens.append(token)
i = token.find('.')
if 0 < i < len(token) - 1:
new_tokens = ['['] + token.replace('.', ' . ').split(' ') + [']']
new_query_tokens.extend(new_tokens)
# check if the code compiles
tree = parse(code)
ast_tree = tree_to_ast(tree)
astor.to_source(ast_tree)
return new_query_tokens, code, str_map
示例6: _log_failure
def _log_failure(arg_num, msg=None):
""" Retrace stack and log the failed expresion information """
# stack() returns a list of frame records
# 0 is the _log_failure() function
# 1 is the expect() function
# 2 is the function that called expect(), that's what we want
#
# a frame record is a tuple like this:
# (frame, filename, line, funcname, contextlist, index)
# we're only interested in the first 4.
frame, filename, file_lineno, funcname = inspect.stack()[2][:4]
# Note that a frame object should be deleted once used to be safe and stop possible
# memory leak from circular referencing
try:
frame_source_lines, frame_start_lineno = (inspect.getsourcelines(frame))
finally:
del frame
filename = os.path.basename(filename)
# Build abstract syntax tree from source of frame
source_ast = ast.parse(''.join(frame_source_lines))
# Locate the executed expect function
func_body = source_ast.body[0].body
map_lineno_to_node = {}
for idx, node in enumerate(func_body):
map_lineno_to_node[node.lineno] = node
last_lineno = file_lineno - frame_start_lineno + 1
element_idx = [x for x in map_lineno_to_node.keys() if x <= last_lineno]
element_idx = max(element_idx)
expect_function_ast = map_lineno_to_node[element_idx]
# Return the source code of the numbered argument
arg = expect_function_ast.value.args[arg_num]
line = arg.lineno
if isinstance(arg, (ast.Tuple, ast.List)):
expr = astor.to_source(arg.elts[0])
else:
expr = astor.to_source(arg)
filename = os.path.basename(filename)
failure_info = {'file': filename, 'line': line, 'funcname': funcname, 'msg': msg, 'expression': expr}
_failed_expectations.append(failure_info)
示例7: compare
def compare(self, src, expected_src):
actual_root = ast.parse(utils.unindent(src))
scoping.ScopeAssigner().visit(actual_root)
EncodeScopeInIdentifier().visit(actual_root)
actual_src = astor.to_source(actual_root)
expected_root = ast.parse(utils.unindent(expected_src))
expected_src = astor.to_source(expected_root)
cmps = itertools.izip_longest(expected_src.splitlines(), actual_src.splitlines())
for linenr, c in enumerate(cmps, 1):
expected_line = c[0]
actual_line = c[1]
self.assertEqual(expected_line, actual_line, "Line %s differs. Expected %s but got %s." % (linenr, repr(expected_line), repr(actual_line)))
示例8: test_more_captures
def test_more_captures():
name_types = (ast.Name, ast.arg) if six.PY3 else ast.Name
@compile_template
def map_lambda(var=name_types, body=ast.expr, seq=ast.expr):
map(lambda var: body, seq)
@get_body_ast
def tree():
squares = map(lambda x: x ** 2, range(10))
m = match(map_lambda, tree)[0]
assert astor.to_source(m.captures['body']).strip() == '(x ** 2)'
assert astor.to_source(m.captures['seq']).strip() == 'range(10)'
示例9: log_mutant
def log_mutant(self, active_file, logger):
""" Prints a one-line summary to highlight the difference between the original code and the mutant
split('\n')[0] is used to truncate if/elif mutation instances (entire if sections were printed before)
"""
logger.info("{0} - Line {1}".format(active_file, self.line_no))
logger.info("Original: {0}".format(self.original_source.split('\n')[0]))
logger.info("Mutant : {0}".format(astor.to_source(self.base_node)).split('\n')[0])
示例10: parse_bbscript
def parse_bbscript(f,basename,dirname):
global commandDB,astRoot,j,MODE
BASE = f.tell()
astRoot = Module(body=[])
j = OrderedDict()
j["Functions"] = []
j["FunctionsPy"] = []
f.seek(0x30)
filesize = struct.unpack(MODE+"I",f.read(4))[0]
f.seek(0x38)
FUNCTION_COUNT, = struct.unpack(MODE+"I",f.read(4))
f.seek(0x24*(FUNCTION_COUNT),1)
parse_bbscript_routine(f, filesize + 0x38)
'''
for i in range(0,FUNCTION_COUNT):
f.seek(BASE+4+0x24*i)
FUNCTION_NAME = f.read(0x20).split("\x00")[0]
if log: log.write("\n#---------------{0} {1}/{2}\n".format(FUNCTION_NAME,i,FUNCTION_COUNT))
FUNCTION_OFFSET, = struct.unpack(MODE+"I",f.read(4))
f.seek(BASE+4+0x24*FUNCTION_COUNT+FUNCTION_OFFSET)
parse_bbscript_routine(f)
'''
py = open(os.path.join(dirname, basename) + ".py","w")
py.write(astor.to_source(astRoot))
py.close()
return j
示例11: canonicalize_hs_example
def canonicalize_hs_example(query, code):
query = re.sub(r'<.*?>', '', query)
query_tokens = nltk.word_tokenize(query)
code = code.replace('§', '\n').strip()
# sanity check
parse_tree = parse_raw(code)
gold_ast_tree = ast.parse(code).body[0]
gold_source = astor.to_source(gold_ast_tree)
ast_tree = parse_tree_to_python_ast(parse_tree)
pred_source = astor.to_source(ast_tree)
assert gold_source == pred_source, 'sanity check fails: gold=[%s], actual=[%s]' % (gold_source, pred_source)
return query_tokens, code, parse_tree
示例12: _fail
def _fail(node, msg='Visit error'):
try:
raise NotImplementedError('%s (in ast.%s). Source:\n\t\t\t%s' % (
msg, node.__class__.__name__, astor.to_source(node)))
except AttributeError: # Astor was unable to convert the source.
raise NotImplementedError('%s (in ast.%s).' % (
msg, node.__class__.__name__))
示例13: startJob
def startJob(self, body):
project = json.loads(body)
os.mkdir(self.job_dir)
# sprit_id = project['sprite_idx']
block_id = project['block_idx']
# Write uploaded program
xml = os.path.join(self.job_dir, 'job.xml')
with open(xml, 'w') as file:
file.write(project['project'].encode('utf-8'))
# Parse and write python program
p = parser.parses(project['project'].encode('utf-8'))
ctx = p.create_context()
file_ast = p.to_ast(ctx, 'main_%s' % block_id)
code = astor.to_source(file_ast)
program = os.path.join(self.job_dir, 'job.py')
with open(program, 'w') as file:
file.write(code)
self.job_process = JobProcess(self, self.id)
reactor.spawnProcess(
self.job_process, sys.executable,
[sys.executable, program], env=os.environ)
示例14: parse_bbscript
def parse_bbscript(f,basename,filename,filesize):
global commandDB,astRoot,charName,j,MODE
BASE = f.tell()
astRoot = Module(body=[])
j = OrderedDict()
j["Functions"] = []
j["FunctionsPy"] = []
charName = filename[-6:-4]
FUNCTION_COUNT, = struct.unpack(MODE+"I",f.read(4))
# f.seek(BASE+4+0x20)
# initEnd, = struct.unpack(MODE+"I",f.read(4))
# initEnd = BASE + initEnd+4+0x24*FUNCTION_COUNT
# initEnd = BASE+filesize
f.seek(BASE+4+0x24*(FUNCTION_COUNT))
parse_bbscript_routine(f,os.path.getsize(f.name))
'''
for i in range(0,FUNCTION_COUNT):
f.seek(BASE+4+0x24*i)
FUNCTION_NAME = f.read(0x20).split("\x00")[0]
if log: log.write("\n#---------------{0} {1}/{2}\n".format(FUNCTION_NAME,i,FUNCTION_COUNT))
FUNCTION_OFFSET, = struct.unpack(MODE+"I",f.read(4))
f.seek(BASE+4+0x24*FUNCTION_COUNT+FUNCTION_OFFSET)
parse_bbscript_routine(f)
'''
if len(sys.argv) == 3:
outpath = os.path.join(sys.argv[2],filename[:-4] + '.py')
else:
outpath = filename[:-4] + '.py'
py = open(outpath,"wb")
py.write(astor.to_source(astRoot))
py.close()
return filename,j
示例15: execute
def execute(self, src, output_contains=None):
src = utils.unindent(src)
expected = self._run(src)
node = ast.parse(src)
node = saneitizer.Saneitizer().process(node)
naming.MakeIdsValid().visit(node)
transformed_code = astor.to_source(node)
pydron_builtins = "from pydron.translation.builtins import *"
transformed_code = pydron_builtins + "\n\n" + transformed_code
try:
# just to see if it compiles
compile(node, "[string]", 'exec')
# we actually use the source code to run
actual = self._run(transformed_code)
self.assertEqual(actual, expected)
if output_contains:
self.assertIn(output_contains, actual)
except:
sys.stderr.write(transformed_code)
sys.stderr.write("\n\n")
raise