当前位置: 首页>>代码示例>>Python>>正文


Python os.setpgid方法代码示例

本文整理汇总了Python中os.setpgid方法的典型用法代码示例。如果您正苦于以下问题:Python os.setpgid方法的具体用法?Python os.setpgid怎么用?Python os.setpgid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在os的用法示例。


在下文中一共展示了os.setpgid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _setPgid

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def _setPgid(pid, pgid):
    """set pgid of a process, ignored exception caused by race condition
    that occurs if already set by parent or child has already existed"""
    # Should just ignore on EACCES, as to handle race condition with parent
    # and child.  However some Linux kernels (seen in 2.6.18-53) report ESRCH
    # or EPERM.  To handle this is a straight-forward way, just check that the
    # change has been made.  However, in some cases the change didn't take,
    # retrying seems to make the problem go away.
    for i in range(0,5):
        try:
            os.setpgid(pid, pgid)
            return
        except OSError:
            if os.getpgid(pid) == pgid:
                return
            time.sleep(0.25) # sleep for retry
    # last try, let it return an error
    os.setpgid(pid, pgid)

# FIXME: why not use pipes.quote? 
开发者ID:ComparativeGenomicsToolkit,项目名称:Comparative-Annotation-Toolkit,代码行数:22,代码来源:pipeline.py

示例2: start

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def start(self):
        """
        Use multiple processes to parse and generate tasks for the
        DAGs in parallel. By processing them in separate processes,
        we can get parallelism and isolation from potentially harmful
        user code.
        """

        self.register_exit_signals()

        # Start a new process group
        os.setpgid(0, 0)

        self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
        self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
        self.log.info(
            "Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
        )

        return self._run_parsing_loop() 
开发者ID:apache,项目名称:airflow,代码行数:22,代码来源:dag_processing.py

示例3: createGlobalSimulator

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def createGlobalSimulator(useUniqueFakeAwsDir=True):
        # Mark our process as the process leader
        os.setpgid(0, 0)

        if not os.path.exists(Setup.config().fakeAwsBaseDir):
            os.makedirs(Setup.config().fakeAwsBaseDir)
        Simulator._originalFakeAwsDir = Setup.config().fakeAwsBaseDir
        if useUniqueFakeAwsDir:
            newDirName = makeUniqueDir()
            fakeAwsBase = Setup.config().fakeAwsBaseDir
            Setup.config().fakeAwsBaseDir = newDirName
            Setup.config().logDir = newDirName
            latestLinkPath = os.path.join(fakeAwsBase, 'latest')
            if os.path.exists(latestLinkPath):
                os.unlink(latestLinkPath)
            os.symlink(newDirName, latestLinkPath)

        assert Simulator._globalSimulator is None
        Simulator._globalSimulator = Simulator()
        return Simulator._globalSimulator 
开发者ID:ufora,项目名称:ufora,代码行数:22,代码来源:ClusterSimulation.py

示例4: __init__

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def __init__(self, threads=1000, workers=0):
        os.umask(0o27)  # ensure files are created with the correct privileges
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = loggers.WritableLogger(self._logger)
        self.threads = threads
        self.children = set()
        self.stale_children = set()
        self.running = True
        self.pgid = os.getpid()
        self.workers = workers
        try:
            # NOTE(flaper87): Make sure this process
            # runs in its own process group.
            os.setpgid(self.pgid, self.pgid)
        except OSError:
            # NOTE(flaper87): When running searchlight-control,
            # (searchlight's functional tests, for example)
            # setpgid fails with EPERM as searchlight-control
            # creates a fresh session, of which the newly
            # launched service becomes the leader (session
            # leaders may not change process groups)
            #
            # Running searchlight-api is safe and
            # shouldn't raise any error here.
            self.pgid = 0 
开发者ID:openstack,项目名称:searchlight,代码行数:27,代码来源:wsgi.py

示例5: execute

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def execute(self,dt):
        if self.finished: return "finished"
        if not self.running:
            self.process = Process(target = executeInProcessGroup, args = (self,))
            self.process.start()
            print "timeshare child PID:",self.process.pid
            os.setpgid(self.process.pid,self.process.pid)
            print "timeshare process group",os.getpgid(self.process.pid)
            assert os.getpgid(self.process.pid) == self.process.pid
            print "my process group",os.getpgrp(),"which should be",os.getpgid(0)
            assert os.getpgid(self.process.pid) != os.getpgid(0)
            self.running = True
        else:
            os.killpg(self.process.pid, signal.SIGCONT)
        
        self.process.join(dt)
        if self.process.is_alive():
            os.killpg(self.process.pid, signal.SIGSTOP)
            return "still running"
        else:
            self.finished = True
            return self.q.get() 
开发者ID:ellisk42,项目名称:TikZ,代码行数:24,代码来源:timeshare.py

示例6: _run_processor_manager

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def _run_processor_manager(dag_directory,
                               max_runs,
                               processor_factory,
                               processor_timeout,
                               signal_conn,
                               dag_ids,
                               pickle_dags,
                               async_mode):

        # Make this process start as a new process group - that makes it easy
        # to kill all sub-process of this at the OS-level, rather than having
        # to iterate the child processes
        os.setpgid(0, 0)

        setproctitle("airflow scheduler -- DagFileProcessorManager")
        # Reload configurations and settings to avoid collision with parent process.
        # Because this process may need custom configurations that cannot be shared,
        # e.g. RotatingFileHandler. And it can cause connection corruption if we
        # do not recreate the SQLA connection pool.
        os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
        os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
        # Replicating the behavior of how logging module was loaded
        # in logging_config.py
        importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
        importlib.reload(airflow.settings)
        airflow.settings.initialize()
        del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
        processor_manager = DagFileProcessorManager(dag_directory,
                                                    max_runs,
                                                    processor_factory,
                                                    processor_timeout,
                                                    signal_conn,
                                                    dag_ids,
                                                    pickle_dags,
                                                    async_mode)

        processor_manager.start() 
开发者ID:apache,项目名称:airflow,代码行数:39,代码来源:dag_processing.py

示例7: setpgid

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def setpgid(self, pgid):
        os.setpgid(self.childpid, pgid) 
开发者ID:kdart,项目名称:pycopia,代码行数:4,代码来源:proctools.py

示例8: __init__

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def __init__(self, *args, **kwargs):
            if len(args) >= 7:
                raise Exception("Arguments preexec_fn and after must be passed by keyword.")

            real_preexec_fn = kwargs.pop("preexec_fn", None)
            def setpgid_preexec_fn():
                os.setpgid(0, 0)
                if real_preexec_fn:
                    apply(real_preexec_fn)

            kwargs['preexec_fn'] = setpgid_preexec_fn

            subprocess.Popen.__init__(self, *args, **kwargs) 
开发者ID:Laharah,项目名称:deluge-FileBotTool,代码行数:15,代码来源:killableprocess.py

示例9: executeInProcessGroup

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def executeInProcessGroup(task):
    os.setpgid(0,0)
    task.q.put(task.command(*task.arguments)) 
开发者ID:ellisk42,项目名称:TikZ,代码行数:5,代码来源:timeshare.py

示例10: parallel_execute

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def parallel_execute(commands: List[List[str]], cpus: Optional[int] = None,
                     timeout: Optional[int] = None, verbose: bool = True) -> List[int]:
    """ Limited return vals, only returns return codes
    """
    if verbose:
        runner = verbose_child_process
    else:
        runner = child_process
    os.setpgid(0, 0)
    if not cpus:
        cpus = get_config().cpus
    assert isinstance(cpus, int)
    pool = multiprocessing.Pool(cpus)
    jobs = pool.map_async(runner, commands)

    try:
        errors = jobs.get(timeout=timeout)
    except multiprocessing.TimeoutError:
        pool.terminate()
        assert isinstance(timeout, int)
        raise RuntimeError("One of %d child processes timed out after %d seconds" % (
                cpus, timeout))

    except KeyboardInterrupt:
        logging.error("Interrupted by user")
        pool.terminate()
        raise

    pool.close()

    return errors 
开发者ID:antismash,项目名称:antismash,代码行数:33,代码来源:base.py

示例11: restore_input_output

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def restore_input_output(self):
        if self.__old_stdout is not None:
            sys.stdout.flush()
            # now we reset stdout to be the whatever it was before
            sys.stdout = self.__old_stdout
        if self.__old_stdin is not None:
            sys.stdin = self.__old_stdin
        if self.__old_pgid is not None:
            os.setpgid(0, self.__old_pgid) 
开发者ID:sassoftware,项目名称:conary,代码行数:11,代码来源:__init__.py

示例12: switch_pgid

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def switch_pgid(self):
        try:
            if os.getpgrp() != os.tcgetpgrp(0):
                self.__old_pgid = os.getpgrp()
                os.setpgid(0, os.tcgetpgrp(0))
            else:
                self.__old_pgid = None
        except OSError:
            self.__old_pgid = None 
开发者ID:sassoftware,项目名称:conary,代码行数:11,代码来源:__init__.py

示例13: __init__

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def __init__(self, name, conf, threads=1000):
        os.umask(0o27)  # ensure files are created with the correct privileges
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self.name = name
        self.threads = threads
        self.children = set()
        self.stale_children = set()
        self.running = True
        self.pgid = os.getpid()
        self.conf = conf
        try:
            os.setpgid(self.pgid, self.pgid)
        except OSError:
            self.pgid = 0 
开发者ID:openstack,项目名称:senlin,代码行数:16,代码来源:wsgi.py

示例14: _start_by_fork

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def _start_by_fork(self):  # pylint: disable=inconsistent-return-statements
        pid = os.fork()
        if pid:
            self.log.info("Started process %d to run task", pid)
            return psutil.Process(pid)
        else:
            from airflow.cli.cli_parser import get_parser
            from airflow.sentry import Sentry
            import signal
            import airflow.settings as settings

            signal.signal(signal.SIGINT, signal.SIG_DFL)
            signal.signal(signal.SIGTERM, signal.SIG_DFL)
            # Start a new process group
            os.setpgid(0, 0)

            # Force a new SQLAlchemy session. We can't share open DB handles
            # between process. The cli code will re-create this as part of its
            # normal startup
            settings.engine.pool.dispose()
            settings.engine.dispose()

            parser = get_parser()
            # [1:] - remove "airflow" from the start of the command
            args = parser.parse_args(self._command[1:])

            self.log.info('Running: %s', self._command)
            self.log.info('Job %s: Subtask %s', self._task_instance.job_id, self._task_instance.task_id)

            proc_title = "airflow task runner: {0.dag_id} {0.task_id} {0.execution_date}"
            if hasattr(args, "job_id"):
                proc_title += " {0.job_id}"
            setproctitle(proc_title.format(args))

            try:
                args.func(args, dag=self.dag)
                return_code = 0
            except Exception:  # pylint: disable=broad-except
                return_code = 1
            finally:
                # Explicitly flush any pending exception to Sentry if enabled
                Sentry.flush()
                os._exit(return_code)  # pylint: disable=protected-access 
开发者ID:apache,项目名称:airflow,代码行数:45,代码来源:standard_task_runner.py

示例15: submethod

# 需要导入模块: import os [as 别名]
# 或者: from os import setpgid [as 别名]
def submethod(self, _method, args=None, kwargs=None, pwent=None):
        args = args or ()
        kwargs = kwargs or {}
        signal.signal(SIGCHLD, SIG_DFL) # critical area
        proc = SubProcess(pwent=pwent)
        if proc.childpid == 0: # in child
            os.setpgid(0, self._pgid)
            sys.excepthook = sys.__excepthook__
            self._procs.clear()
            try:
                rv = _method(*args, **kwargs)
            except SystemExit as val:
                rv = val.code
            except:
                ex, val, tb = sys.exc_info()
                try:
                    import traceback
                    try:
                        fname = _method.__name__
                    except AttributeError:
                        try:
                            fname = _method.__class__.__name__
                        except AttributeError:
                            fname = str(_method)
                    with open("/tmp/" + fname + "_error.log", "w+") as errfile:
                        traceback.print_exception(ex, val, tb, None, errfile)
                finally:
                    ex = val = tb = None
                rv = 127
            if rv is None:
                rv = 0
            try:
                rv = int(rv)
            except:
                rv = 0
            os._exit(rv)
        else:
            self._procs[proc.childpid] = proc
            signal.signal(SIGCHLD, self._child_handler)
            signal.siginterrupt(SIGCHLD, False)
            return proc

    # introspection and query methods 
开发者ID:kdart,项目名称:pycopia,代码行数:45,代码来源:proctools.py


注:本文中的os.setpgid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。