當前位置: 首頁>>代碼示例>>Python>>正文


Python Logger.info方法代碼示例

本文整理匯總了Python中utilities.Logger.Logger.info方法的典型用法代碼示例。如果您正苦於以下問題:Python Logger.info方法的具體用法?Python Logger.info怎麽用?Python Logger.info使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utilities.Logger.Logger的用法示例。


在下文中一共展示了Logger.info方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: delete_duplicates_recursively

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def delete_duplicates_recursively(self):
     Logger.info("Removing duplicates in", self.search_dir)
     for duplicate in self.find_duplicate_contents(self.search_dir):
         if duplicate.endswith(self.config.run_extension):
             continue
         Logger.info("Deleting the duplicate file:", duplicate)
         os.remove(duplicate)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:9,代碼來源:FileDuplicateFinder.py

示例2: remove_readmes

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def remove_readmes(self):
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename == "README.txt":
                 filepath = os.path.join(path, filename)
                 Logger.info("Deleting the file:", filepath)
                 os.remove(filepath)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:9,代碼來源:FileDuplicateFinder.py

示例3: divide_by_signal

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def divide_by_signal(self, confirmation_loops=0, function=shutil.copyfile):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     ex = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join( path, filename )
             command = self.config.get_command_line(self.binary_to_use, filepath)
             Logger.debug("Executing:", command, debug_level=4)
             Logger.busy()
             signal = ex.get_signal_for_run(command, env=self.config.env)
             while confirmation_loops > 0:
                 Logger.busy()
                 new_signal = ex.get_signal_for_run(command, env=self.config.env)
                 if new_signal == signal:
                     signal = new_signal
                     confirmation_loops -= 1
                 else:
                     Logger.info("Detected varying return codes for exactly the same run")
                     signal = SignalFinder.VARYING_SIGNAL
                     break
             Logger.debug("We consider signal %i for input file %s" % (signal, filename), debug_level=5)
             destination_dir = self.get_folder_path_for_signal(signal)
             if not os.path.exists(destination_dir):
                 os.mkdir(destination_dir)
             function(filepath, os.path.join(destination_dir, filename))
開發者ID:chubbymaggie,項目名稱:afl-crash-analyzer,代碼行數:30,代碼來源:SignalFinder.py

示例4: _handle_alarm

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def _handle_alarm(self, signum, frame):
     # If the alarm is triggered, we're still in the communicate()
     # call, so use kill() to end the process
     self.timeout_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info("Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.", ose)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:10,代碼來源:Executer.py

示例5: rename_all_files

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def rename_all_files(self, extension=""):
     i = 1
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             formatstr = "%0"+str(self.config.max_digets+4)+"d"
             new_filename = formatstr % i
             shutil.move(os.path.join(path, filename), os.path.join(path, new_filename+extension))
             i = i+1
     Logger.info("Renamed all files starting from 1, last file was named", new_filename+extension)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:11,代碼來源:FileDuplicateFinder.py

示例6: minimize_testcases

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def minimize_testcases(self):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             Logger.info("Minimizing", filename)
             filepath = os.path.join(path, filename)
             cmd = self.config.get_afl_tmin_command_line(filepath, os.path.join(self.output_dir, filename))
             Logger.debug("Executing:", cmd)
             signal = executer.get_signal_for_run(cmd, self.config.run_timeout_tmin, env=self.config.env)
             if signal == SignalFinder.TIMEOUT_SIGNAL:
                 Logger.error("Minimizing this file took too long, aborted")
開發者ID:hotelzululima,項目名稱:afl-crash-analyzer,代碼行數:15,代碼來源:InputMinimizer.py

示例7: run_forest_run

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
    def run_forest_run(self):
        if self.output_dir is not None and not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        new_file_path = os.path.join(self.config.tmp_dir, "feelingLucky.txt")
        cmd = self.config.get_gdb_command_line(self.config.get_most_standard_binary(), new_file_path, self.gdb_script_path)
        for path, _, files in os.walk(self.search_dir):
            for filename in files:
                eips = []
                indexes = []
                if filename.endswith(self.config.run_extension):
                    continue
                Logger.info("Trying my luck with", filename)
                filepath = os.path.join(path, filename)
                orig_file = file(filepath, "rb").read()
                Logger.debug(filepath, debug_level=4)
                for index in xrange(0,len(orig_file)-len(self.lucky_hex_values)):
                    new_file = orig_file[:index] + self.lucky_hex_values + orig_file[index+len(self.lucky_hex_values):]
                    #Logger.debug(new_file[:100])
                    file(new_file_path, "w").write(new_file)
                    crash_eip = self.get_crash_eip(cmd)
                    if crash_eip:
                        if not crash_eip in eips:
                            eips.append(crash_eip)
                            indexes.append(index)
                        if self.lucky_hex_values <= crash_eip and crash_eip <= self.lucky_hex_values_upper_bound:
                            o = os.path.join(self.output_dir, filename)
                            Logger.info("WTF, we actually were able to control EIP! See file ", o)
                            file(o, "w").write(new_file)
#                        else:
#                            Logger.debug("Binary crashed, but at eip:", hex(crash_eip), "index to put lucky hex value in file:", index, debug_level=7)
                Logger.info("Seen the following crashing eips for this file:", list_as_intervals(eips, as_hex=True))
                Logger.info("File indexes that lead to different crashes for this file:", list_as_intervals(indexes))
開發者ID:chubbymaggie,項目名稱:afl-crash-analyzer,代碼行數:34,代碼來源:FeelingLuckyExploiter.py

示例8: rename_same_name_files

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def rename_same_name_files(self):
     filenames = []
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             i = 1
             new_filename = filename
             name, extension = os.path.splitext(filename)
             while new_filename in filenames:
                 formatstr = "%0"+str(self.config.max_digets)+"d"
                 new_number = formatstr % i
                 new_filename = name + "_" + new_number + extension
                 i += 1
             if not new_filename == filename:
                 Logger.info("Found filename that is already taken, renaming", filename, "to", new_filename)
                 shutil.move(os.path.join(path, filename), os.path.join(path, new_filename))
             filenames.append(new_filename)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:18,代碼來源:FileDuplicateFinder.py

示例9: _handle_sigttou

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def _handle_sigttou(self, signum, frame):
     #Had some issues that when memory corruptions occured in a subprocess
     #(no matter if shielded by multiprocess and subprocess module), 
     #that a SIGTTOU was sent to the entire Python main process.
     #According to https://en.wikipedia.org/wiki/SIGTTOU this
     #results in the process being stopped (and it looks like SIGSTP on the cmd):
     #[1]+  Stopped                 ./AflCrashAnalyzer.py
     #Of course we don't want that. Debugging was hard but then
     #realized after this program was stopped:
     #$ echo $?
     #150
     #So that's SIGTTOU on Linux at least.
     #This handler will prevent the process to stop.
     self.sigttou_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info("Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.", ose)
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:20,代碼來源:Executer.py

示例10: analyze_output_and_exploitability

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
def analyze_output_and_exploitability(config, signal_finder, uninteresting_signals, message_prefix=""):
    for signal, signal_folder in signal_finder.get_folder_paths_for_signals_if_exist(uninteresting_signals):
        skip = False
        for cat in ExploitableGdbPlugin.get_classifications():
            if os.path.exists(os.path.join(signal_folder, cat)):
                Logger.warning("Seems like there are already exploitability analysis results, skipping. If you want to rerun: rm -r %s" % os.path.join(signal_folder, cat))
                skip = True
        if not skip:
            Logger.info(message_prefix, "Discover stdout, stderr, gdb and ASAN output (signal %s)" % signal)
            wildcard_for_run_output_files = os.path.join(signal_folder, "*" + config.run_extension)
            if glob.glob(wildcard_for_run_output_files):
                Logger.warning("Seems like there are already results from running the binaries, skipping. If you want to rerun: rm", wildcard_for_run_output_files)
            else:
                of = OutputFinder(config, signal_folder)
                of.do_sane_output_runs()
            
            Logger.info(message_prefix, "Analyzing exploitability (signal %s)" % signal)
            egp = ExploitableGdbPlugin(config, signal_folder)
            egp.divide_by_exploitability()
開發者ID:LucaBongiorni,項目名稱:afl-crash-analyzer,代碼行數:21,代碼來源:AflCrashAnalyzer.py

示例11: sanity_check

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
    def sanity_check(self):
        ##
        # Sanity checks and initial setup
        ##
        if not os.access(self.target_binary_instrumented, os.R_OK):
            Logger.fatal("AFL target binary not accessible:", self.target_binary_instrumented+". Did you configure the CrashAnalysisConfig class?")
        if not self.target_binary_plain is None and not os.access(self.target_binary_plain, os.R_OK):
            Logger.fatal("Target binary not accessible:", self.target_binary_plain+". Did you configure the CrashAnalysisConfig class?")
        if not self.target_binary_asan is None and not os.access(self.target_binary_asan, os.R_OK):
            Logger.fatal("ASAN target binary not accessible:", self.target_binary_asan+". Did you configure the CrashAnalysisConfig class?")
        if not os.access(self.main_dir, os.F_OK):
            Logger.fatal("Your main_dir doesn't exist:", self.main_dir)
        if not os.access(self.original_crashes_directory, os.F_OK):
            Logger.fatal("Your original_crashes_directory doesn't exist:", self.original_crashes_directory)

        if os.path.exists(self.output_dir):
            Logger.warning("Your output directory already exists, did you want to move it before running?", self.output_dir)
        else:
            Logger.info("Output folder will be:", self.output_dir)
            os.mkdir(self.output_dir)
        if not os.path.exists(self.tmp_dir):
            os.mkdir(self.tmp_dir)
        self.prepare_gdb_script()
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:25,代碼來源:CrashAnalysisConfig.py

示例12: do_sane_output_runs

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
 def do_sane_output_runs(self):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     if self.config.target_binary_plain is None and self.config.target_binary_asan is None:
         Logger.warning("You didn't specify any non-instrumented binary, running tests with instrumented binaries")
         self.instrumented_combined_stdout_stderr()
         self.instrumented_combined_stdout_stderr(gdb_run=True)
     else:
         Logger.info("Plain run")
         self.plain_combined_stdout_stderr()
         Logger.info("Plain gdb run")
         self.plain_combined_stdout_stderr(gdb_run=True)
         Logger.info("ASAN run")
         self.asan_combined_stdout_stderr()
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:16,代碼來源:OutputFinder.py

示例13: main

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "gdb"
    gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    config_gm = CrashAnalysisConfig(where_this_python_script_lives, 
                            target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm", 
                            args_before="identify", 
                            args_after="", 
                            target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm", 
                            target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
                            env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
                            crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
                            gdb_script=gdb_script_32bit,
                            gdb_binary=gdb_command
                            )
    
#    config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives, 
#                        target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg", 
#                        args_before="-i", 
#                        args_after="-loglevel quiet", 
#                        target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg", 
##                        target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
#                        env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
#                        crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
#                        gdb_script=gdb_script_32bit,
#                        gdb_binary=gdb_command
#                        )

    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(config_gm, config_gm.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(config_gm)
    if os.path.exists(config_gm.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", config_gm.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above for Linux
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0,129)
    
    analyze_output_and_exploitability(config_gm, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
        
    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(config_gm.default_minimized_crashes_directory):
        Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", config_gm.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(config_gm, signal_folder)
            im.minimize_testcases()
        
        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(config_gm, config_gm.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()
#.........這裏部分代碼省略.........
開發者ID:LucaBongiorni,項目名稱:afl-crash-analyzer,代碼行數:103,代碼來源:AflCrashAnalyzer.py

示例14: main

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
printf "[+] list\n"
list
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
printf "[+] list\n"
list
"""

    #TODO: Make sure gdb script doesn't abort on error
    #ignoring errors in gdb scripts: http://stackoverflow.com/questions/17923865/gdb-stops-in-a-command-file-if-there-is-an-error-how-to-continue-despite-the-er
    gdb_script_32bit_noerror = r"""python
def my_ignore_errors(arg):
  try:
    gdb.execute("print \"" + "Executing command: " + arg + "\"")
    gdb.execute (arg)
  except:
    gdb.execute("print \"" + "ERROR: " + arg + "\"")

my_ignore_errors("p p")
my_ignore_errors("p p->v1")
gdb.execute("quit")
    """

    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "/usr/bin/gdb"
    #gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    #TODO: For some reason the ASAN environment variables are not correctly set when given to the subprocess module... so let's just set it in parent process already:
    os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4"
    os.environ['ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
    env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"}
    
    ###
    #This import decides which testcase/binary we want to run!
    ###
    from testcases.gm.Config import create_config
    #from testcases.ffmpeg.Config import create_config
    #see CrashAnalysisConfig for more options that get passed on by create_config
    chosen_config = create_config(where_this_python_script_lives, env=env, gdb_script=gdb_script_32bit, gdb_binary=gdb_command)
    chosen_config.sanity_check()
    
    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(chosen_config, chosen_config.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    #OR:
    #Logger.info("Renaming all files to numeric values, as some programs prefer no special chars in filenames and might require a specific file extension")
    #fdf.rename_all_files(".png")
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(chosen_config)
    if os.path.exists(chosen_config.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", chosen_config.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above sometimes for Linux on the shell (depending on used mechanism)
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0, 129)
    
    analyze_output_and_exploitability(chosen_config, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
#.........這裏部分代碼省略.........
開發者ID:andigena,項目名稱:afl-crash-analyzer,代碼行數:103,代碼來源:AflCrashAnalyzer.py

示例15: main

# 需要導入模塊: from utilities.Logger import Logger [as 別名]
# 或者: from utilities.Logger.Logger import info [as 別名]
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "gdb"
    gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    config_gm = CrashAnalysisConfig(where_this_python_script_lives, 
                            target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm", 
                            args_before="identify", 
                            args_after="", 
                            target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm", 
                            target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
                            env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
                            crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
                            gdb_script=gdb_script_32bit,
                            gdb_binary=gdb_command
                            )
    
#    config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives, 
#                        target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg", 
#                        args_before="-i", 
#                        args_after="-loglevel quiet", 
#                        target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg", 
##                        target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
#                        env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
#                        crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
#                        gdb_script=gdb_script_32bit,
#                        gdb_binary=gdb_command
#                        )

    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(config_gm)
    fdf.remove_readmes(config_gm.original_crashes_directory)
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively(config_gm.original_crashes_directory)
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files(config_gm.original_crashes_directory)
    
    #
    Logger.info("Finding signals for all crash files")
    #
    sf = SignalFinder(config_gm)
    if os.path.exists(sf.output_dir):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. Remove output directory or remove this folder if you want to rerun:", sf.output_dir)
    else:
        Logger.info("Dividing files to output folder according to their signal")
        os.mkdir(sf.output_dir)
        sf.divide_by_signal(0)
        
    
    #
    Logger.info("Running binaries to discover stdout/stderr, gdb and ASAN output for crash files that result in interesting signals")
    #
    #signals, negative on OSX, 129 and above for Linux. No harm if we go on with all of them.
    signals = (-4, -6, -11, 132, 134, 139)
    get_output_for_signals(config_gm, sf, signals)

    
    #
    Logger.info("Minimizing input files that result in interesting signals (and removing duplicates from the results)")
    #
    im = InputMinimizer(config_gm)
    if os.path.exists(im.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. Remove output directory or remove this folder if you want to rerun:", im.output_dir)
    else:
        os.mkdir(im.output_dir)
#.........這裏部分代碼省略.........
開發者ID:hotelzululima,項目名稱:afl-crash-analyzer,代碼行數:103,代碼來源:AflCrashAnalyzer.py


注:本文中的utilities.Logger.Logger.info方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。