当前位置: 首页>>代码示例>>Python>>正文


Python daemon.pidfile方法代码示例

本文整理汇总了Python中daemon.pidfile方法的典型用法代码示例。如果您正苦于以下问题:Python daemon.pidfile方法的具体用法?Python daemon.pidfile怎么用?Python daemon.pidfile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在daemon的用法示例。


在下文中一共展示了daemon.pidfile方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: kerberos

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def kerberos(args):
    """Start a kerberos ticket renewer"""
    print(settings.HEADER)

    if args.daemon:
        pid, stdout, stderr, _ = setup_locations(
            "kerberos", args.pid, args.stdout, args.stderr, args.log_file
        )
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            krb.run(principal=args.principal, keytab=args.keytab)

        stdout.close()
        stderr.close()
    else:
        krb.run(principal=args.principal, keytab=args.keytab) 
开发者ID:apache,项目名称:airflow,代码行数:26,代码来源:kerberos_command.py

示例2: start_queue_monitor

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def start_queue_monitor(poll_frequency, max_workers, user, group, directory):
    pw = pwd.getpwnam(user)
    gid = pw.pw_gid if group is None else grp.getgrnam(group).gr_gid
    # This will capture stderr from this process as well as all child
    # energyPATHWAYS processes. Normally it will be empty, but it can
    # help capture model startup problems that would otherwise be hard to see.
    err = open('/var/log/queue_monitor/qm_stderr_%s.log' % start_time, 'w+')

    with daemon.DaemonContext(
        files_preserve=[logging.root.handlers[0].stream.fileno()],
        pidfile=daemon.pidfile.PIDLockFile('/var/run/queue_monitor/queue_monitor.pid'),
        uid=pw.pw_uid,
        gid=gid,
        working_directory=directory,
        stderr=err
    ):
        logger.info('My process id is %i' % os.getpid())
        qm = QueueMonitor(poll_frequency, max_workers)
        qm.start() 
开发者ID:energyPATHWAYS,项目名称:EnergyPATHWAYS,代码行数:21,代码来源:queue_monitor.py

示例3: stop_endpoint

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def stop_endpoint(name: str = typer.Argument("default", autocompletion=complete_endpoint_name)):
    """ Stops an endpoint using the pidfile

    """

    endpoint_dir = os.path.join(State.FUNCX_DIR, name)
    pid_file = os.path.join(endpoint_dir, "daemon.pid")

    if os.path.exists(pid_file):
        logger.debug(f"{name} has a daemon.pid file")
        pid = None
        with open(pid_file, 'r') as f:
            pid = int(f.read())
        # Attempt terminating
        try:
            logger.debug("Signalling process: {}".format(pid))
            os.kill(pid, signal.SIGTERM)
            time.sleep(0.1)
            os.kill(pid, signal.SIGKILL)
            time.sleep(0.1)
            # Wait to confirm that the pid file disappears
            if not os.path.exists(pid_file):
                logger.info("Endpoint <{}> is now stopped".format(name))

        except OSError:
            logger.warning("Endpoint {} could not be terminated".format(name))
            logger.warning("Attempting Endpoint {} cleanup".format(name))
            os.remove(pid_file)
            sys.exit(-1)
    else:
        logger.info("Endpoint <{}> is not active.".format(name)) 
开发者ID:funcx-faas,项目名称:funcX,代码行数:33,代码来源:endpoint.py

示例4: scheduler

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def scheduler(args):
    """Starts Airflow Scheduler"""
    print(settings.HEADER)
    job = SchedulerJob(
        dag_id=args.dag_id,
        subdir=process_subdir(args.subdir),
        num_runs=args.num_runs,
        do_pickle=args.do_pickle)

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("scheduler",
                                                        args.pid,
                                                        args.stdout,
                                                        args.stderr,
                                                        args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            job.run()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        job.run() 
开发者ID:apache,项目名称:airflow,代码行数:37,代码来源:scheduler_command.py

示例5: flower

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def flower(args):
    """Starts Flower, Celery monitoring tool"""
    options = [
        conf.get('celery', 'BROKER_URL'),
        f"--address={args.hostname}",
        f"--port={args.port}",
    ]

    if args.broker_api:
        options.append(f"--broker-api={args.broker_api}")

    if args.url_prefix:
        options.append(f"--url-prefix={args.url_prefix}")

    if args.basic_auth:
        options.append(f"--basic-auth={args.basic_auth}")

    if args.flower_conf:
        options.append(f"--conf={args.flower_conf}")

    flower_cmd = FlowerCommand()

    if args.daemon:
        pidfile, stdout, stderr, _ = setup_locations(
            process="flower",
            pid=args.pid,
            stdout=args.stdout,
            stderr=args.stderr,
            log=args.log_file,
        )
        with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile, -1),
                stdout=stdout,
                stderr=stderr,
            )
            with ctx:
                flower_cmd.execute_from_commandline(argv=options)
    else:
        flower_cmd.execute_from_commandline(argv=options) 
开发者ID:apache,项目名称:airflow,代码行数:42,代码来源:celery_command.py

示例6: main

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def main():
    parser = argparse.ArgumentParser(description=needl.__description__)
    parser.add_argument('--datadir', default=os.getcwd() + '/data', help='Data directory')
    parser.add_argument('-d', '--daemon', action='store_true', help='Run as a deamon')
    parser.add_argument('-v', '--verbose', action='store_true', help='Increase logging')
    parser.add_argument('--logfile', type=argparse.FileType('a'), default=sys.stdout, help='Log to this file. Default is stdout.')
    parser.add_argument('--pidfile', default='/tmp/needl.pid', help='Save process PID to this file. Default is /tmp/needl.pid. Only valid when running as a daemon.')
    args = parser.parse_args()

    if args.daemon and args.logfile is sys.stdout:
        args.logfile = open('/tmp/needl.log', 'a')

    needl.init(args)
    daemonize(args.logfile, args.pidfile) if args.daemon else start() 
开发者ID:eth0izzle,项目名称:Needl,代码行数:16,代码来源:needl.py

示例7: daemonize

# 需要导入模块: import daemon [as 别名]
# 或者: from daemon import pidfile [as 别名]
def daemonize(logfile, pidfile):
    needl.log.info('Daemonizing and logging to %s', logfile)

    with daemon.DaemonContext(working_directory=os.getcwd(),
                              stderr=logfile,
                              umask=0o002,
                              pidfile=daemon.pidfile.PIDLockFile(pidfile)) as dc:

        start() 
开发者ID:eth0izzle,项目名称:Needl,代码行数:11,代码来源:needl.py


注:本文中的daemon.pidfile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。