当前位置: 首页>>代码示例>>Python>>正文


Python JoinableQueue.full方法代码示例

本文整理汇总了Python中multiprocessing.JoinableQueue.full方法的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue.full方法的具体用法?Python JoinableQueue.full怎么用?Python JoinableQueue.full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.JoinableQueue的用法示例。


在下文中一共展示了JoinableQueue.full方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: AnalysisManager

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import full [as 别名]
class AnalysisManager():
    """Manage all analysis' process."""

    def __init__(self):
        # Processing pool.
        logger.debug("Using pool on %i core" % self.get_parallelism())
        # Load modules.
        self.modules = []
        self.load_modules()
        self.check_module_deps()
        # Starting worker pool.
        self.workers = []
        self.tasks = JoinableQueue(self.get_parallelism())
        self.workers_start()

    def workers_start(self):
        """Start workers pool."""
        for _ in range(self.get_parallelism()):
            runner = AnalysisRunner(self.tasks, self.modules)
            runner.start()
            self.workers.append(runner)

    def workers_stop(self):
        """Stop workers pool."""
        # Wait for.
        for sex_worker in self.workers:
            sex_worker.join()

    def get_parallelism(self):
        """Get the ghiro parallelism level for analysis processing."""
        # Check database type. If we detect SQLite we slow down processing to
        # only one process. SQLite does not support parallelism.
        if settings.DATABASES["default"]["ENGINE"].endswith("sqlite3"):
            logger.warning("Detected SQLite database, decreased parallelism to 1. SQLite doesn't support parallelism.")
            return 1
        elif cpu_count() > 1:
            # Set it to total CPU minus one let or db and other use.
            return cpu_count() - 1
        else:
            return 1

    def load_modules(self):
        """Load modules."""
        # Search for analysis modules, it need to import module directory as package named "modules".
        for loader_instance, module_name, is_pkg in pkgutil.iter_modules(modules.__path__, modules.__name__ + "."):
            # Skip packages.
            if is_pkg:
                continue
            # Load module.
            # NOTE: This code is inspired to Cuckoo Sandbox module loading system.
            try:
                module = __import__(module_name, globals(), locals(), ["dummy"], -1)
            except ImportError as e:
                logger.error("Unable to import module: %s" % module)
            else:
                for class_name, class_pkg in inspect.getmembers(module):
                    if inspect.isclass(class_pkg):
                        # Load only modules which inherits BaseModule.
                        if issubclass(class_pkg, BaseProcessingModule) and class_pkg is not BaseProcessingModule:
                            self.modules.append(class_pkg)
                            logger.debug("Found module: %s" % class_name)

        # Sort modules by execution order.
        self.modules.sort(key=lambda x: x.order)

    def check_module_deps(self):
        """Check modules for requested deps, if not found removes the module from the list."""
        for plugin in self.modules:
            # NOTE: create the module class instance.
            if not plugin().check_deps():
                self.modules.remove(plugin)
                logger.warning("Kicked module, requirements not found: %s" % plugin.__name__)

    def run(self):
        """Start all analyses."""
        # Clean up tasks remaining stale from old runs.
        if Analysis.objects.filter(state="P").exists():
            logger.info("Found %i stale analysis, putting them in queue." % Analysis.objects.filter(state="P").count())
            Analysis.objects.filter(state="P").update(state="W")

        # Infinite finite loop.
        try:
            while True:
                # Fetch tasks waiting processing.
                tasks = Analysis.objects.filter(state="W").order_by("id")

                if tasks.exists() and not self.tasks.full():
                    # Using iterator() to avoid caching.
                    for task in Analysis.objects.filter(state="W").order_by("id").iterator():
                        self.tasks.put(task)
                        logger.debug("Processing task %s" % task.id)
                        task.state = "P"
                        task.save()
                elif self.tasks.full():
                    logger.debug("Queue full. Waiting...")
                    sleep(1)
                else:
                    logger.debug("No tasks. Waiting...")
                    sleep(1)
        except KeyboardInterrupt:
#.........这里部分代码省略.........
开发者ID:Scinawa,项目名称:ghiro,代码行数:103,代码来源:processing.py

示例2: while

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import full [as 别名]
     histogram_merge_worker.start()

     if args.top:
          reader_procs = [ psutil.Process(reader.pid) for reader in readers ]
          worker_procs = [ psutil.Process(worker.pid) for worker in workers ]

     pair_buffer={}
     scaffold_count={}
#     while (not inq.empty()) or sum( [reader.is_alive() for reader in readers] )>0:
     while True:
          if args.debug: print("get")
          try:
               procid,scaffold,pairs = inq.get()
#               procid,scaffold,pairs = inq.get(True,10)
               #print("#got data:",procid,scaffold,len(pairs))
               print("#got data from inq:",procid,scaffold,len(pairs),inq.empty(),inq.qsize(),inq.full(),strftime("%Y-%m-%d %H:%M:%S"),sum( [reader.is_alive() for reader in readers] ),"q.size():",q.qsize(),file=sys.stderr,sep="\t")
               sys.stderr.flush()
               sys.stdout.flush()
          except Exception as e:
               print(e,file=sys.stderr)
               if args.top:
                    print("queue get timed out",[reader.cpu_percent() for reader in reader_procs],[worker.cpu_percent() for worker in worker_procs])
               #print("#timed out",inq.empty())
               print("#read from queue timed out:",inq.empty(),inq.qsize(),inq.full(),strftime("%Y-%m-%d %H:%M:%S"),sum( [reader.is_alive() for reader in readers] ),file=sys.stderr,sep="\t")
               sys.stderr.flush()
               continue
          if args.debug: print("got")
          if not scaffold in pair_buffer:
               pair_buffer[scaffold]=[]
          pair_buffer[scaffold] += pairs
          scaffold_count[scaffold] = scaffold_count.get(scaffold,0)+1
开发者ID:alexharkess,项目名称:HiRise_July2015_GR,代码行数:33,代码来源:parallel_breaker.py


注:本文中的multiprocessing.JoinableQueue.full方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。