本文整理匯總了Python中daemon.DaemonContext方法的典型用法代碼示例。如果您正苦於以下問題:Python daemon.DaemonContext方法的具體用法?Python daemon.DaemonContext怎麽用?Python daemon.DaemonContext使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類daemon
的用法示例。
在下文中一共展示了daemon.DaemonContext方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _start
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def _start(args):
"""Create a new process of ATM pointing the process to a certain ``pid`` file."""
pid_path = _get_pid_path(args.pid)
process = _get_atm_process(pid_path)
if process:
print('ATM is already running!')
else:
print('Starting ATM')
if args.foreground:
_start_background(args)
else:
pidfile = PIDLockFile(pid_path, timeout=1.0)
with DaemonContext(pidfile=pidfile, working_directory=os.getcwd()):
# Set up default log file if not already set
if not args.logfile:
_logging_setup(args.verbose, 'atm.log')
_start_background(args)
示例2: executor_starter
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def executor_starter(htex, logdir, endpoint_id, logging_level=logging.DEBUG):
from funcx import set_file_logger
stdout = open(os.path.join(logdir, "executor.{}.stdout".format(endpoint_id)), 'w')
stderr = open(os.path.join(logdir, "executor.{}.stderr".format(endpoint_id)), 'w')
logdir = os.path.abspath(logdir)
with daemon.DaemonContext(stdout=stdout, stderr=stderr):
global logger
print("cwd: ", os.getcwd())
logger = set_file_logger(os.path.join(logdir, "executor.{}.log".format(endpoint_id)),
level=logging_level)
htex.start()
stdout.close()
stderr.close()
示例3: _exec
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def _exec(self, detach=True):
"""
daemonize and exec main()
"""
kwargs = {
'pidfile': self.pidfile,
'working_directory': self.home_dir,
}
# FIXME - doesn't work
if not detach:
kwargs.update({
'detach_process': False,
'files_preserve': [0,1,2],
'stdout': sys.stdout,
'stderr': sys.stderr,
})
ctx = daemon.DaemonContext(**kwargs)
with ctx:
self._main()
示例4: kerberos
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def kerberos(args):
"""Start a kerberos ticket renewer"""
print(settings.HEADER)
if args.daemon:
pid, stdout, stderr, _ = setup_locations(
"kerberos", args.pid, args.stdout, args.stderr, args.log_file
)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
krb.run(principal=args.principal, keytab=args.keytab)
stdout.close()
stderr.close()
else:
krb.run(principal=args.principal, keytab=args.keytab)
示例5: start_daemon
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def start_daemon(pidf, logf):
### This launches the daemon in its context
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/fiscalberry_daemon.log')
logger.addHandler(handler)
rootpath = os.path.dirname(os.path.abspath(__file__))
### XXX pidfile is a context
with daemon.DaemonContext(
stdout=handler.stream,
stderr=handler.stream,
working_directory=rootpath,
umask=0o002,
pidfile=pidfile.TimeoutPIDLockFile(pidf),
files_preserve=[handler.stream]
) as context:
do_something()
示例6: start_queue_monitor
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def start_queue_monitor(poll_frequency, max_workers, user, group, directory):
pw = pwd.getpwnam(user)
gid = pw.pw_gid if group is None else grp.getgrnam(group).gr_gid
# This will capture stderr from this process as well as all child
# energyPATHWAYS processes. Normally it will be empty, but it can
# help capture model startup problems that would otherwise be hard to see.
err = open('/var/log/queue_monitor/qm_stderr_%s.log' % start_time, 'w+')
with daemon.DaemonContext(
files_preserve=[logging.root.handlers[0].stream.fileno()],
pidfile=daemon.pidfile.PIDLockFile('/var/run/queue_monitor/queue_monitor.pid'),
uid=pw.pw_uid,
gid=gid,
working_directory=directory,
stderr=err
):
logger.info('My process id is %i' % os.getpid())
qm = QueueMonitor(poll_frequency, max_workers)
qm.start()
示例7: get_daemon_context
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def get_daemon_context():
daemon_pid_file = get_pid_file_path('jms')
context = daemon.DaemonContext(
pidfile=pidfile.TimeoutPIDLockFile(daemon_pid_file),
signal_map={
signal.SIGTERM: lambda x, y: clean_up(),
signal.SIGHUP: 'terminate',
},
files_preserve=files_preserve,
detach_process=True,
)
return context
示例8: scheduler
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def scheduler(args):
"""Starts Airflow Scheduler"""
print(settings.HEADER)
job = SchedulerJob(
dag_id=args.dag_id,
subdir=process_subdir(args.subdir),
num_runs=args.num_runs,
do_pickle=args.do_pickle)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("scheduler",
args.pid,
args.stdout,
args.stderr,
args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
job.run()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
job.run()
示例9: flower
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def flower(args):
"""Starts Flower, Celery monitoring tool"""
options = [
conf.get('celery', 'BROKER_URL'),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
flower_cmd = FlowerCommand()
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
flower_cmd.execute_from_commandline(argv=options)
else:
flower_cmd.execute_from_commandline(argv=options)
示例10: main
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def main():
if len(sys.argv) == 2:
config = load_conf(sys.argv[1])
else:
config = load_conf('/opt/janus-cloud/conf/janus-sentinel.yml')
if config['general']['daemonize']:
with DaemonContext(stdin=sys.stdin,
stdout=sys.stdout,
# working_directory=os.getcwd(),
files_preserve=list(range(3, 100))):
do_main(config)
else:
do_main(config)
示例11: main
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def main():
if len(sys.argv) == 2:
config = load_conf(sys.argv[1])
else:
config = load_conf('/opt/janus-cloud/conf/janus-proxy.yml')
if config['general']['daemonize']:
with DaemonContext(stdin=sys.stdin,
stdout=sys.stdout,
# working_directory=os.getcwd(),
files_preserve=list(range(3, 100))):
do_main(config)
else:
do_main(config)
示例12: daemonize
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def daemonize(logfile, pidfile):
needl.log.info('Daemonizing and logging to %s', logfile)
with daemon.DaemonContext(working_directory=os.getcwd(),
stderr=logfile,
umask=0o002,
pidfile=daemon.pidfile.PIDLockFile(pidfile)) as dc:
start()
示例13: open
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def open(self):
self._addLoggerFiles()
daemon.DaemonContext.open(self)
if self.stdout_logger:
fileLikeObj = FileLikeLogger(self.stdout_logger)
sys.stdout = fileLikeObj
if self.stderr_logger:
fileLikeObj = FileLikeLogger(self.stderr_logger)
sys.stderr = fileLikeObj
#---------------------------------------------------------------
示例14: __init__
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def __init__(self, app):
""" Set up the parameters of a new runner.
The `app` argument must have the following attributes:
* `stdin_path`, `stdout_path`, `stderr_path`: Filesystem
paths to open and replace the existing `sys.stdin`,
`sys.stdout`, `sys.stderr`.
* `pidfile_path`: Absolute filesystem path to a file that
will be used as the PID file for the daemon. If
``None``, no PID file will be used.
* `pidfile_timeout`: Used as the default acquisition
timeout value supplied to the runner's PID lock file.
* `run`: Callable that will be invoked when the daemon is
started.
"""
self.parse_args()
self.app = app
self.daemon_context = DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'r')
self.daemon_context.stdout = open(app.stdout_path, 'w+')
self.daemon_context.stderr = open(
app.stderr_path, 'w+', buffering=0)
self.pidfile = None
if app.pidfile_path is not None:
self.pidfile = make_pidlockfile(
app.pidfile_path, app.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
示例15: main
# 需要導入模塊: import daemon [as 別名]
# 或者: from daemon import DaemonContext [as 別名]
def main(self, args):
warnings.showwarning = self.showwarning
warnings.filterwarnings('ignore', category=CompoundPiStaleSequence)
warnings.filterwarnings('ignore', category=CompoundPiStaleClientTime)
if args.debug:
# Don't bother with daemon context in debug mode; we generally
# want to debug protocol stuff anyway...
signal.signal(signal.SIGINT, self.interrupt)
signal.signal(signal.SIGTERM, self.terminate)
self.privileged_setup(args)
self.serve_forever()
else:
pidfile = daemon.runner.make_pidlockfile(args.pidfile, 5)
if daemon.runner.is_pidfile_stale(pidfile):
pidfile.break_lock()
self.privileged_setup(args)
# Ensure the server's socket, any log file, and stderr are preserved
# (if not forking)
files_preserve = [self.server.socket]
for handler in logging.getLogger().handlers:
if isinstance(handler, logging.FileHandler):
files_preserve.append(handler.stream)
logging.info('Entering daemon context')
with daemon.DaemonContext(
# The following odd construct is to ensure detachment only
# where sensible (see default setting of detach_process)
detach_process=None if args.daemon else False,
stderr=None if args.daemon else sys.stderr,
uid=args.user, gid=args.group,
files_preserve=files_preserve,
pidfile=pidfile,
signal_map={
signal.SIGTERM: self.terminate,
signal.SIGINT: self.interrupt,
}
):
self.serve_forever()
logging.info('Exiting daemon context')