当前位置: 首页>>代码示例>>Python>>正文


Python Database.set_status方法代码示例

本文整理汇总了Python中lib.cuckoo.core.database.Database.set_status方法的典型用法代码示例。如果您正苦于以下问题:Python Database.set_status方法的具体用法?Python Database.set_status怎么用?Python Database.set_status使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib.cuckoo.core.database.Database的用法示例。


在下文中一共展示了Database.set_status方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: autoprocess

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def autoprocess(parallel=1):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pool = multiprocessing.Pool(parallel)
    pending_results = []

    # CAUTION - big ugly loop ahead.
    while count < maxcount or not maxcount:

        # Pending_results maintenance.
        for ar, tid, target, copy_path in list(pending_results):
            if ar.ready():
                if ar.successful():
                    log.info("Task #%d: reports generation completed", tid)
                else:
                    try:
                        ar.get()
                    except:
                        log.exception("Exception when processing task ID %u.", tid)
                        db.set_status(tid, TASK_FAILED_PROCESSING)

                pending_results.remove((ar, tid, target, copy_path))

        # If still full, don't add more (necessary despite pool).
        if len(pending_results) >= parallel:
            time.sleep(1)
            continue

        # If we're here, getting parallel tasks should at least
        # have one we don't know.
        tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel,
                              order_by="completed_on asc")

        # For loop to add only one, nice.
        for task in tasks:
            # Not-so-efficient lock.
            if task.id in [tid for ar, tid, target, copy_path
                           in pending_results]:
                continue

            log.info("Processing analysis data for Task #%d", task.id)

            sample = db.view_sample(task.sample_id)

            copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                     "binaries", sample.sha256)

            args = task.id, task.target, copy_path
            kwargs = dict(report=True, auto=True)
            result = pool.apply_async(process, args, kwargs)

            pending_results.append((result, task.id, task.target, copy_path))

            count += 1
            break

        # If there wasn't anything to add, sleep tight.
        if not tasks:
            time.sleep(5)
开发者ID:drptbl,项目名称:cuckoo,代码行数:62,代码来源:process.py

示例2: instance

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def instance(instance):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()

    try:
        while not maxcount or count != maxcount:
            if maxcount:
                limit = min(maxcount - count, 32)
            else:
                limit = 32

            tps = db.list_processing_tasks(instance=instance, count=limit)

            # No new tasks, we can wait a small while before we query again
            # for new tasks.
            if not tps:
                # Just make sure this instance is still available - it is not
                # if the scheduler has been restarted. In that case there will
                # be no records at all for this processing task.
                if not db.count_processing_tasks(instance):
                    log.info("This instance (%s) is not available anymore, "
                             "stopping.", instance)
                    break

                time.sleep(1)
                continue

            for tp in tps:
                task = db.view_task(tp.task_id)
                if task.status != TASK_COMPLETED:
                    log.warning("Task #%d: status (%s) is not completed, "
                                "ignoring", task.id, task.status)
                    continue

                log.info("Task #%d: reporting task", task.id)

                if task.category == "file":
                    sample = db.view_sample(task.sample_id)

                    copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                             "binaries", sample.sha256)
                else:
                    copy_path = None

                try:
                    process(task.target, copy_path, task=task.to_dict(),
                            report=True, auto=True)
                    db.set_status(task.id, TASK_REPORTED)
                except Exception as e:
                    log.exception("Task #%d: error reporting: %s", task.id, e)
                    db.set_status(task.id, TASK_FAILED_PROCESSING)

                db.delete_processing_task(tp)
    except KeyboardInterrupt:
        raise
    except Exception as e:
        log.exception("Caught unknown exception: %s", e)
开发者ID:LittleHann,项目名称:cuckoo-linux,代码行数:60,代码来源:process2.py

示例3: unschedule

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def unschedule(request, task_id):
    db = Database()
    task = db.view_task(task_id)
    if task.status == TASK_SCHEDULED:
        db.set_status(task_id, TASK_UNSCHEDULED)

    return render_to_response("success.html",
            {"message": "Task unscheduled, thanks for all the fish."},
            context_instance=RequestContext(request))
开发者ID:jbremer,项目名称:longcuckoo,代码行数:11,代码来源:views.py

示例4: autoprocess

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def autoprocess(parallel=1):
    cfg = Config()
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pool = multiprocessing.Pool(parallel)
    pending_results = []

    # CAUTION - big ugly loop ahead
    while count < maxcount or not maxcount:

        # pending_results maintenance
        for ar, tid in list(pending_results):
            if ar.ready():
                if ar.successful():
                    print "subtask success", tid, "returnvalue", ar.get()
                    log.info("Task #%d: reports generation completed", tid)
                else:
                    try: ar.get()
                    except:
                        log.exception("Exception when processing task ID %u.", tid)
                        db.set_status(tid, TASK_FAILED_PROCESSING)

                pending_results.remove((ar, tid))

        # if still full, don't add more (necessary despite pool)
        if len(pending_results) >= parallel:
            time.sleep(1)
            continue

        # if we're here, getting #parallel tasks should at least have one we don't know
        tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel)

        # for loop to add only one, nice
        for task in tasks:
            # not-so-efficient lock
            if task.id in [tid for ar, tid in pending_results]:
                continue

            log.info("Processing analysis data for Task #%d", task.id)

            result = pool.apply_async(do, (task.id,), {"report": True})                
            pending_results.append((result, task.id))

            count += 1
            break

        # if there wasn't anything to add, sleep tight
        if not tasks:
            time.sleep(5)
开发者ID:DjDarthyGamer,项目名称:cuckoo,代码行数:52,代码来源:process.py

示例5: init_tasks

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def init_tasks():
    """Check tasks and reschedule uncompleted ones."""
    db = Database()
    cfg = Config()

    log.debug("Checking for locked tasks...")
    tasks = db.list_tasks(status=TASK_RUNNING)

    for task in tasks:
        if cfg.cuckoo.reschedule:
            db.reschedule(task.id)
            log.info("Rescheduled task with ID {0} and " "target {1}".format(task.id, task.target))
        else:
            db.set_status(task.id, TASK_FAILED_ANALYSIS)
            log.info("Updated running task ID {0} status to failed_analysis".format(task.id))
开发者ID:open-nsm,项目名称:dockoo-cuckoo,代码行数:17,代码来源:startup.py

示例6: init_tasks

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def init_tasks():
    """Check tasks and reschedule uncompleted ones."""
    db = Database()
    cfg = Config()

    log.debug("Checking for locked tasks..")
    for task in db.list_tasks(status=TASK_RUNNING):
        if cfg.cuckoo.reschedule:
            task_id = db.reschedule(task.id)
            log.info(
                "Rescheduled task with ID %s and target %s: task #%s",
                task.id, task.target, task_id
            )
        else:
            db.set_status(task.id, TASK_FAILED_ANALYSIS)
            log.info("Updated running task ID {0} status to failed_analysis".format(task.id))

    log.debug("Checking for pending service tasks..")
    for task in db.list_tasks(status=TASK_PENDING, category="service"):
        db.set_status(task.id, TASK_FAILED_ANALYSIS)
开发者ID:awest1339,项目名称:cuckoo,代码行数:22,代码来源:startup.py

示例7: instance

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def instance(instance):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()

    # There's a good chance MySQL also works, though.
    if db.engine.name != "postgresql":
        sys.exit("Due to SQL limitations utils/process2.py currently only "
                 "supports PostgreSQL.")

    try:
        while not maxcount or count != maxcount:
            task_id = db.processing_get_task(instance)

            # Wait a small while before trying to fetch a new task.
            if task_id is None:
                time.sleep(1)
                continue

            task = db.view_task(task_id)

            log.info("Task #%d: reporting task", task.id)

            if task.category == "file":
                sample = db.view_sample(task.sample_id)

                copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                         "binaries", sample.sha256)
            else:
                copy_path = None

            try:
                process(task.target, copy_path, task=task.to_dict(),
                        report=True, auto=True)
                db.set_status(task.id, TASK_REPORTED)
            except Exception as e:
                log.exception("Task #%d: error reporting: %s", task.id, e)
                db.set_status(task.id, TASK_FAILED_PROCESSING)
    except Exception as e:
        log.exception("Caught unknown exception: %s", e)
开发者ID:AntiRootkit,项目名称:cuckoo,代码行数:42,代码来源:process2.py

示例8: main

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("id", type=str, help="ID of the analysis to process")
    parser.add_argument("-d", "--debug", help="Display debug messages", action="store_true", required=False)
    parser.add_argument("-r", "--report", help="Re-generate report", action="store_true", required=False)
    args = parser.parse_args()

    if args.debug:
        log.setLevel(logging.DEBUG)

    init_modules()

    if args.id == "auto":
        cfg = Config()
        maxcount = cfg.cuckoo.max_analysis_count
        count = 0
        db = Database()
        while count < maxcount or not maxcount:
            tasks = db.list_tasks(status=TASK_COMPLETED, limit=1)

            for task in tasks:
                log.info("Processing analysis data for Task #%d", task.id)
                try:
                    do(task.id, report=True)
                except:
                    log.exception("Exception when processing a task.")
                    db.set_status(task.id, TASK_FAILED_PROCESSING)
                else:
                    log.info("Task #%d: reports generation completed", task.id)

                count += 1

            if not tasks:
                time.sleep(5)

    else:
        do(args.id, report=args.report)
开发者ID:1000rub,项目名称:cuckoo,代码行数:39,代码来源:process.py

示例9: AnalysisManager

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]

#.........这里部分代码省略.........
                # not turned dead yet.
                machinery.release(self.machine.label)
            except CuckooMachineError as e:
                log.error("Unable to release machine %s, reason %s. "
                          "You might need to restore it manually.",
                          self.machine.label, e)

        return succeeded

    def process_results(self):
        """Process the analysis results and generate the enabled reports."""
        results = RunProcessing(task=self.task.to_dict()).run()
        RunSignatures(results=results).run()
        RunReporting(task=self.task.to_dict(), results=results).run()

        # If the target is a file and the user enabled the option,
        # delete the original copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_original:
            if not os.path.exists(self.task.target):
                log.warning("Original file does not exist anymore: \"%s\": "
                            "File not found.", self.task.target)
            else:
                try:
                    os.remove(self.task.target)
                except OSError as e:
                    log.error("Unable to delete original file at path "
                              "\"%s\": %s", self.task.target, e)

        # If the target is a file and the user enabled the delete copy of
        # the binary option, then delete the copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
            if not os.path.exists(self.binary):
                log.warning("Copy of the original file does not exist anymore: \"%s\": File not found", self.binary)
            else:
                try:
                    os.remove(self.binary)
                except OSError as e:
                    log.error("Unable to delete the copy of the original file at path \"%s\": %s", self.binary, e)

        log.info("Task #%d: reports generation completed (path=%s)",
                 self.task.id, self.storage)

        return True

    def run(self):
        """Run manager thread."""
        global active_analysis_count
        active_analysis_count += 1
        try:
            while True:
                try:
                    success = self.launch_analysis()
                except CuckooDeadMachine:
                    continue

                break

            self.db.set_status(self.task.id, TASK_COMPLETED)

            # If the task is still available in the database, update our task
            # variable with what's in the database, as otherwise we're missing
            # out on the status and completed_on change. This would then in
            # turn thrown an exception in the analysisinfo processing module.
            self.task = self.db.view_task(self.task.id) or self.task

            log.debug("Released database task #%d with status %s",
                      self.task.id, success)

            if self.cfg.cuckoo.process_results:
                self.process_results()
                self.db.set_status(self.task.id, TASK_REPORTED)

            # We make a symbolic link ("latest") which links to the latest
            # analysis - this is useful for debugging purposes. This is only
            # supported under systems that support symbolic links.
            if hasattr(os, "symlink"):
                latest = os.path.join(CUCKOO_ROOT, "storage",
                                      "analyses", "latest")

                # First we have to remove the existing symbolic link, then we
                # have to create the new one.
                # Deal with race conditions using a lock.
                latest_symlink_lock.acquire()
                try:
                    # As per documentation, lexists() returns True for dead
                    # symbolic links.
                    if os.path.lexists(latest):
                        os.remove(latest)

                    os.symlink(self.storage, latest)
                except OSError as e:
                    log.warning("Error pointing latest analysis symlink: %s" % e)
                finally:
                    latest_symlink_lock.release()

            log.info("Task #%d: analysis procedure completed", self.task.id)
        except:
            log.exception("Failure in AnalysisManager.run")

        active_analysis_count -= 1
开发者ID:Sunrel,项目名称:cuckoo,代码行数:104,代码来源:scheduler.py

示例10: AnalysisManager

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]

#.........这里部分代码省略.........
            log.error(
                "The network interface '%s' configured for this analysis is "
                "not available at the moment, switching to route=none mode.",
                self.interface
            )
            route = "none"
            self.task.options["route"] = "none"
            self.interface = None
            self.rt_table = None

        if self.interface:
            rooter("forward_enable", self.machine.interface,
                   self.interface, self.machine.ip)

        if self.rt_table:
            rooter("srcroute_enable", self.rt_table, self.machine.ip)

        # Propagate the taken route to the database.
        self.db.set_route(self.task.id, route)

    def unroute_network(self):
        if self.interface:
            rooter("forward_disable", self.machine.interface,
                   self.interface, self.machine.ip)

        if self.rt_table:
            rooter("srcroute_disable", self.rt_table, self.machine.ip)

    def wait_finish(self):
        """Some VMs don't have an actual agent. Mainly those that are used as
        assistance for an analysis through the services auxiliary module. This
        method just waits until the analysis is finished rather than actively
        trying to engage with the Cuckoo Agent."""
        self.db.guest_set_status(self.task.id, "running")
        while self.db.guest_get_status(self.task.id) == "running":
            time.sleep(1)

    def guest_manage(self, options):
        # Handle a special case where we're creating a baseline report of this
        # particular virtual machine - a report containing all the results
        # that are gathered if no additional samples are ran in the VM. These
        # results, such as loaded drivers and opened sockets in volatility, or
        # DNS requests to hostnames related to Microsoft Windows, etc may be
        # omitted or at the very least given less priority when creating a
        # report for an analysis that ran on this VM later on.
        if self.task.category == "baseline":
            time.sleep(options["timeout"])
        else:
            # Initialize the guest manager.
            guest = GuestManager(self.machine.name, self.machine.ip,
                                 self.machine.platform, self.task.id)

            # Start the analysis.
            self.db.guest_set_status(self.task.id, "starting")
            monitor = self.task.options.get("monitor", "latest")
            guest.start_analysis(options, monitor)

            # In case the Agent didn't respond and we force-quit the analysis
            # at some point while it was still starting the analysis the state
            # will be "stop" (or anything but "running", really).
            if self.db.guest_get_status(self.task.id) == "starting":
                self.db.guest_set_status(self.task.id, "running")
                guest.wait_for_completion()

            self.db.guest_set_status(self.task.id, "stopping")
开发者ID:0x71,项目名称:cuckoo,代码行数:69,代码来源:scheduler.py

示例11: import_analysis

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]

#.........这里部分代码省略.........
            # return render(request, "error.html", {
            #     "error": "You uploaded a file that exceeds that maximum allowed upload size.",
            # })

        if not analysis.name.endswith(".zip"):
            return render(request, "error.html", {
                "error": "You uploaded an analysis that wasn't a .zip.",
            })

        zf = zipfile.ZipFile(analysis)

        # As per Python documentation we have to make sure there are no
        # incorrect filenames.
        for filename in zf.namelist():
            if filename.startswith("/") or ".." in filename or ":" in filename:
                return render(request, "error.html", {
                    "error": "The zip file contains incorrect filenames, "
                             "please provide a legitimate .zip file.",
                })

        if "analysis.json" in zf.namelist():
            analysis_info = json.loads(zf.read("analysis.json"))
        elif "binary" in zf.namelist():
            analysis_info = {
                "target": {
                    "category": "file",
                },
            }
        else:
            analysis_info = {
                "target": {
                    "category": "url",
                    "url": "unknown",
                },
            }

        category = analysis_info["target"]["category"]
        info = analysis_info.get("info", {})

        if category == "file":
            binary = store_temp_file(zf.read("binary"), "binary")

            if os.path.isfile(binary):
                task_id = db.add_path(file_path=binary,
                                      package=info.get("package"),
                                      timeout=0,
                                      options=info.get("options"),
                                      priority=0,
                                      machine="",
                                      custom=info.get("custom"),
                                      memory=False,
                                      enforce_timeout=False,
                                      tags=info.get("tags"))
                if task_id:
                    task_ids.append(task_id)

        elif category == "url":
            url = analysis_info["target"]["url"]
            if not url:
                return render(request, "error.html", {
                    "error": "You specified an invalid URL!",
                })

            task_id = db.add_url(url=url,
                                 package=info.get("package"),
                                 timeout=0,
                                 options=info.get("options"),
                                 priority=0,
                                 machine="",
                                 custom=info.get("custom"),
                                 memory=False,
                                 enforce_timeout=False,
                                 tags=info.get("tags"))
            if task_id:
                task_ids.append(task_id)

        if not task_id:
            continue

        # Extract all of the files related to this analysis. This probably
        # requires some hacks depending on the user/group the Web
        # Interface is running under.
        analysis_path = os.path.join(
            CUCKOO_ROOT, "storage", "analyses", "%d" % task_id
        )

        if not os.path.exists(analysis_path):
            os.mkdir(analysis_path)

        zf.extractall(analysis_path)

        # We set this analysis as completed so that it will be processed
        # automatically (assuming process.py / process2.py is running).
        db.set_status(task_id, TASK_COMPLETED)

    if task_ids:
        return render(request, "submission/complete.html", {
            "tasks": task_ids,
            "baseurl": request.build_absolute_uri("/")[:-1],
        })
开发者ID:HarryR,项目名称:cuckoo,代码行数:104,代码来源:views.py

示例12: autoprocess

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def autoprocess(parallel=1):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pending_results = {}

    # Respawn a worker process every 1000 tasks just in case we
    # have any memory leaks.
    pool = multiprocessing.Pool(processes=parallel, initializer=init_worker,
                                maxtasksperchild=1000)

    try:
        while True:
            # Pending results maintenance.
            for tid, ar in pending_results.items():
                if not ar.ready():
                    continue

                if ar.successful():
                    log.info("Task #%d: reports generation completed", tid)
                    db.set_status(tid, TASK_REPORTED)
                else:
                    try:
                        ar.get()
                    except Exception as e:
                        log.critical("Task #%d: exception in reports generation: %s", tid, e)
                        if hasattr(e, "traceback"):
                            log.info(e.traceback)

                    db.set_status(tid, TASK_FAILED_PROCESSING)

                pending_results.pop(tid)
                count += 1

            # Make sure our queue has plenty of tasks in it.
            if len(pending_results) >= QUEUE_THRESHOLD:
                time.sleep(1)
                continue

            # End of processing?
            if maxcount and count == maxcount:
                break

            # No need to submit further tasks for reporting as we've already
            # gotten to our maximum.
            if maxcount and count + len(pending_results) == maxcount:
                time.sleep(1)
                continue

            # Get at most queue threshold new tasks. We skip the first N tasks
            # where N is the amount of entries in the pending results list.
            # Given we update a tasks status right before we pop it off the
            # pending results list it is guaranteed that we skip over all of
            # the pending tasks in the database and no further.
            if maxcount:
                limit = maxcount - count - len(pending_results)
            else:
                limit = QUEUE_THRESHOLD

            tasks = db.list_tasks(status=TASK_COMPLETED,
                                  offset=len(pending_results),
                                  limit=min(limit, QUEUE_THRESHOLD),
                                  order_by=Task.completed_on)

            # No new tasks, we can wait a small while before we query again
            # for new tasks.
            if not tasks:
                time.sleep(5)
                continue

            for task in tasks:
                # Ensure that this task is not already in the pending list.
                # This is really mostly for debugging and should never happen.
                assert task.id not in pending_results

                log.info("Task #%d: queueing for reporting", task.id)

                if task.category == "file":
                    sample = db.view_sample(task.sample_id)

                    copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                             "binaries", sample.sha256)
                else:
                    copy_path = None

                args = task.target, copy_path
                kwargs = dict(report=True, auto=True, task=task.to_dict())
                result = pool.apply_async(process_wrapper, args, kwargs)
                pending_results[task.id] = result
    except KeyboardInterrupt:
        pool.terminate()
        raise
    except:
        log.exception("Caught unknown exception")
    finally:
        pool.join()
开发者ID:LittleHann,项目名称:cuckoo-linux,代码行数:98,代码来源:process.py

示例13: AnalysisManager

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]

#.........这里部分代码省略.........
        # If the target is a file and the user enabled the delete copy of
        # the binary option, then delete the copy.
        if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
            if not os.path.exists(self.binary):
                log.warning("Task #{0}: Copy of the original file does not exist anymore: '{1}': "
                            "File not found".format(self.task.id, self.binary)
                           )
            else:
                try:
                    os.remove(self.binary)
                except OSError as e:
                    log.error("Task #{0}: Unable to delete the copy of the original file at path "
                              "'{1}': {2}".format(self.task.id, self.binary, e))

        log.info("Task #{0}: reports generation completed (path={1})".format(
                    self.task.id, self.storage)
                )

        return True

    def run(self):
        """Run manager thread."""
        global active_analysis_count
        active_analysis_count += 1
        try:
            while True:
                try:
                    success = self.launch_analysis()
                except CuckooDeadMachine:
                    continue

                break

            self.db.set_status(self.task.id, TASK_COMPLETED)

            # If the task is still available in the database, update our task
            # variable with what's in the database, as otherwise we're missing
            # out on the status and completed_on change. This would then in
            # turn thrown an exception in the analysisinfo processing module.
            self.task = self.db.view_task(self.task.id) or self.task

            log.debug("Task #{0}: Released database task with status {1}".format(self.task.id, success))

            if self.cfg.cuckoo.process_results:
                self.process_results()
                self.db.set_status(self.task.id, TASK_REPORTED)

            # We make a symbolic link ("latest") which links to the latest
            # analysis - this is useful for debugging purposes. This is only
            # supported under systems that support symbolic links.
            if hasattr(os, "symlink"):
                latest = os.path.join(CUCKOO_ROOT, "storage",
                                      "analyses", "latest")

                # First we have to remove the existing symbolic link, then we
                # have to create the new one.
                # Deal with race conditions using a lock.
                latest_symlink_lock.acquire()
                try:
                    # As per documentation, lexists() returns True for dead
                    # symbolic links.
                    if os.path.lexists(latest):
                        os.remove(latest)

                    os.symlink(self.storage, latest)
                except OSError as e:
开发者ID:CIRCL,项目名称:cuckoo-modified,代码行数:70,代码来源:scheduler.py

示例14: autoprocess

# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import set_status [as 别名]
def autoprocess(parallel=1):
    maxcount = cfg.cuckoo.max_analysis_count
    count = 0
    db = Database()
    pool = multiprocessing.Pool(parallel, init_worker)
    pending_results = []

    try:
        # CAUTION - big ugly loop ahead.
        while count < maxcount or not maxcount:

            # Pending_results maintenance.
            for ar, tid, target, copy_path in list(pending_results):
                if ar.ready():
                    if ar.successful():
                        log.info("Task #%d: reports generation completed", tid)
                    else:
                        try:
                            ar.get()
                        except:
                            log.exception("Exception when processing task ID %u.", tid)
                            db.set_status(tid, TASK_FAILED_PROCESSING)

                    pending_results.remove((ar, tid, target, copy_path))

            # If still full, don't add more (necessary despite pool).
            if len(pending_results) >= parallel:
                time.sleep(5)
                continue

            # If we're here, getting parallel tasks should at least
            # have one we don't know.
            tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel,
                                  order_by="completed_on asc")

            added = False
            # For loop to add only one, nice. (reason is that we shouldn't overshoot maxcount)
            for task in tasks:
                # Not-so-efficient lock.
                if task.id in [tid for ar, tid, target, copy_path
                               in pending_results]:
                    continue

                log.info("Processing analysis data for Task #%d", task.id)

                if task.category == "file":
                    sample = db.view_sample(task.sample_id)

                    copy_path = os.path.join(CUCKOO_ROOT, "storage",
                                             "binaries", sample.sha256)
                else:
                    copy_path = None

                args = task.target, copy_path
                kwargs = dict(report=True, auto=True, task=task.to_dict())
                result = pool.apply_async(process, args, kwargs)

                pending_results.append((result, task.id, task.target, copy_path))

                count += 1
                added = True
                break

            if not added:
                # don't hog cpu
                time.sleep(5)

    except KeyboardInterrupt:
        pool.terminate()
        raise
    except:
        import traceback
        traceback.print_exc()
    finally:
        pool.join()
开发者ID:niterain,项目名称:cuckoo-modified,代码行数:77,代码来源:process.py


注:本文中的lib.cuckoo.core.database.Database.set_status方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。